test_onnx_export.py 51.7 KB
Newer Older
1
2
3
4
5
6
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.

"""
This file contains tests for exporting TransformerEngine models to ONNX.
7
8
9
10
11
12
13
14

The purpose of these tests is validation that TE models are converted to their correct ONNX
representation. Toward this end, each test captures the output of a TE module forward pass,
converts the TE module to ONNX, and uses ONNX Runtime (ORT) to execute the ONNX graph and
validate the output against TE's output.

Until FP8 is introduced to the ONNX standard, FP8 QuantizeLinear/DequantizeLinear is implemented
using custom ORT operations.
15
16
17
18
19
20

To run many repetitive tests use pytest-loop:
    $ python3 -m pip install pytest-loop
    $ pytest --loop 1000 tests/pytorch/test_onnx_export.py::test_export_layernorm

For reproducability use: torch.manual_seed(0)
21
22
"""

23

24
import os
25
import tempfile
26
27
28
29
30
31
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
32
from typing import Union, Tuple, List
33
34
35
36
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions import gemm, fp8_gemm, fp8_gelu, cast_to_fp8, cast_from_fp8
37
from transformer_engine.pytorch.module.base import get_workspace
38
39
40
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
41
from transformer_engine.pytorch.export import is_in_onnx_export_mode
42
from transformer_engine.pytorch.fp8 import is_fp8_available
43

44
# Global test configuration knobs.
45

46
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
47
48
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))

49
50
51
52
53
if SAVE_TEST_IO:
    from polygraphy.json import save_json
    from polygraphy.comparator import RunResults

# The directory where generated ONNX test models are stored.
Neta Zmora's avatar
Neta Zmora committed
54
55
56
NVTE_TEST_ARTIFACTS_DIR = os.environ.get('NVTE_TEST_ARTIFACTS_DIR')
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(tempfile.gettempdir(), "./gen_onnx_models")

57
58
59

# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
60
61
62
63

# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
Neta Zmora's avatar
Neta Zmora committed
64
OPSET = 17
65
66
assert OPSET >= TRILU_OPSET

67
68
69
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")

70
71
fp8_available, reason_for_no_fp8 = is_fp8_available()
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
72

Neta Zmora's avatar
Neta Zmora committed
73

74
75
76
77
78
79
80
81
82
83
84
85
@pytest.fixture()
def seed_default_rng():
    """Reseed the PRNG for test reproducibility"""
    torch.random.seed()


@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
    """Set the maximum sequence length that can be used for attention masking"""
    os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"


86
87
88
89
90
91
92
93
94
95
def create_fp8_recipe():
    return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3)


def do_export(
    model: torch.nn.Module,
    inp: torch.Tensor,
    fname: str,
    use_fp8: bool=True,
    opset: int=OPSET,
96
97
98
    input_names: List[str]=None,
    output_names: List[str]=None,
    dynamic_axes: List[str]=None
99
100
101
):
    """Export to ONNX"""
    fp8_recipe = create_fp8_recipe()
102
103
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
104
105
106
107
108
109
110
111
112

    with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
        warnings.filterwarnings(
            action='ignore',
            category=torch.jit.TracerWarning,
            module=r'.*'
        )

        model.cuda().eval()
Neta Zmora's avatar
Neta Zmora committed
113
114
        os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
        fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
115

116
        inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
117
118
119
120
        assert len(inps) == len(input_names)
        inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
        input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]

121
        with te.onnx_export(True):
122
123
124
125
126
            torch.onnx.export(
                model,
                inps,
                fname,
                verbose=True,
127
                dynamic_axes=dynamic_axes,
128
129
130
                opset_version=opset,
                input_names=input_names,
                output_names=output_names,
131
                do_constant_folding=True,
132
                operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
133
134
135
136
137
138


def to_numpy(tensor):
    return tensor.cpu().numpy()


139
140
141
142
143
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
    """Initialize the FP8 quantization scales in module"""
    NB_SCALES_PER_GEMM = 3  # One scale per: input, weights, and output GEMM tensors.
    nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
    module.fp8_init(num_gemms)
144
    module.fp8_meta["scaling_fwd"].scale = torch.ones(
145
        nb_total_scales, dtype=torch.float32, device="cuda") / scale
146
    module.fp8_meta["scaling_fwd"].scale_inv = torch.ones(
147
        nb_total_scales, dtype=torch.float32, device="cuda") * scale
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163


def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
    """Transformer Engine forward prpoagtation.

    Return results after copying to the CPU and converting to numpy.
    """
    fp8_recipe = create_fp8_recipe()
    with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
        te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
        if not isinstance(te_outputs, tuple):
            te_outputs = (te_outputs,)
        te_outputs_np = [to_numpy(te_output) for te_output in te_outputs]
        return te_outputs_np


164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname):
    """ Compare ORT and TE outputs."""
    assert len(onnx_outputs) == len(te_outputs)
    # Compare ORT and PyTorch outputs.
    for onnx_output, te_output in zip(onnx_outputs, te_outputs):
        # np.isclose: abs(a - b) <= (atol + rtol * abs(b))
        ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
        mismatches = ac.nonzero()
        mismatched_ids = [loc for loc in zip(*mismatches)]
        if mismatched_ids:
            # Log some information in case of error.
            print("*" * 100)
            nb_errors = len(mismatched_ids)
            nb_vals = min(nb_errors, max_errors_printed)
            print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
            print(f"Showing first {nb_vals} errors (ONNX -- TE):")
            abs_err = np.abs(onnx_output - te_output)
            errors = abs_err[mismatches]
            for loc in mismatched_ids[:nb_vals]:
                ref = te_output[loc]
                print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}")
            print(f"Max error: {np.max(errors)}")
            if nb_errors > allow_cnt_errors:
                raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")


190
191
192
193
194
195
196
197
def validate_result(
    fname: str,
    inps: Union[Tuple[torch.Tensor], torch.Tensor],
    model: torch.nn.Module,
    atol: float=1.e-8, # np.isclose default atol
    rtol: float=1.e-5, # np.isclose default rtol
    max_errors_printed: int=10,
    is_fp8: bool=False,
198
    allow_cnt_errors: int=0,
199
200
201
    input_names: List[str]=None,
    output_names: List[str]=None,
    te_outputs: List[torch.Tensor]=None,
202
):
203
204
205
206
207
208
209
210
211
212
213
    """Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
    representation using ONNX Runtime (ORT) and ensure they are close.

    The purpose of the output comparison is to validate that TE models are converted to
    their correct ONNX representation by testing that TE and ORT outputs match within some
    small threshold (allowing for finite precision errors).

    Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
    a very small number (0-3) of outliers. This is fine to do because these outliers are due to
    small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
    representation (the tests assume both ORT or TE kernels are correct).
214
215

    Argument `te_outputs` can be used to provide pre-computed TE outputs.
216
    """
217
218
219
220
221
222
223
224
225
226

    def create_ort_session(fname: str, is_fp8: bool):
        def load_custom_ops(session_opts: ort.SessionOptions):
            """For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
            if not os.path.exists(ORT_CUSTOM_OPS_LIB):
                raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
            session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
            print("registered custom FP8 Q/DQ ops!")

        """Create an ONNX Runtime session for validation."""
Neta Zmora's avatar
Neta Zmora committed
227
        kwargs = {}
228
229
230
        if is_fp8:
            sess_options = ort.SessionOptions()
            load_custom_ops(sess_options)
231
232
233
            kwargs["sess_options"] = sess_options

        s = ort.InferenceSession(fname, **kwargs)
234
235
        return s

236
237
238
239
240
    def create_ort_input_dict(session, inputs):
        inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
        input_names = [x.name for x in session.get_inputs()]
        inps = [to_numpy(x) for x in inputs if x is not None]
        inp_dict = dict(zip(input_names, inps))
241
242
        return inp_dict

243
    def serialize_inputs_outputs(fname, inputs, inputs_names, te_outputs, output_names):
244
245
        if not SAVE_TEST_IO:
            return
galagam's avatar
galagam committed
246
        inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
247
248
        named_inputs = zip(inputs_names, inputs)
        input_data = [{k: to_numpy(v) for k, v in named_inputs if v is not None}]
249
250
251
        json_fname = fname[:-len(".onnx")] + "_inputs.json"
        save_json(input_data, json_fname, description="custom input data")

252
253
254
255
256
257
258
259
260
261
262
263
        if "bf16" in fname:
            return
        json_fname = fname[:-len(".onnx")] + "_output.json"
        named_outputs = zip(output_names, te_outputs)
        output_data = dict()
        for out_name, outp in named_outputs:
            if outp is not None:
                assert out_name not in output_data
                output_data[out_name] = outp
        custom_outputs = RunResults()
        custom_outputs.add([output_data], runner_name="custom_runner")
        custom_outputs.save(json_fname)
264

265
266
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
267

268
    # Run ORT session and TE model.
Neta Zmora's avatar
Neta Zmora committed
269
    fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
270
271
    if not te_outputs:
        te_outputs = te_infer(model, inps, is_fp8)
Neta Zmora's avatar
Neta Zmora committed
272
273
274
    ort_s = create_ort_session(fname, is_fp8)
    input_feed = create_ort_input_dict(ort_s, inps)
    onnx_outputs = ort_s.run(None, input_feed=input_feed)
275
    compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname)
276
    serialize_inputs_outputs(fname, inps, input_names, te_outputs, output_names)
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312


def create_meta(scale_factor: float, size: int=1):
    meta = tex.FP8TensorMeta()
    meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
    meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
    meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
    return meta


def dtype2str(dtype: torch.dtype):
    return {
        torch.float32: "_fp32",
        torch.float16: "_fp16",
        torch.bfloat16: "_bf16",
    }[dtype]


def as_te_type(dtype: torch.dtype):
    return {
        torch.float32: tex.DType.kFloat32,
        torch.float16: tex.DType.kFloat16,
        torch.bfloat16: tex.DType.kBFloat16,
    }[dtype]


def get_attn_mask_str(use_mask, attn_mask_type):
    # See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
    if attn_mask_type is None:
        return "_mask" if use_mask else "_no-mask"
    attn_mask_str = "_padding-no-mask"
    attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
    attn_mask_str = "_padding-mask" if use_mask and attn_mask_type == "padding" else attn_mask_str
    return attn_mask_str


Neta Zmora's avatar
Neta Zmora committed
313
314
315
316
317
"""
Tests cases begin here.
"""


318
@skip_FP8
Neta Zmora's avatar
Neta Zmora committed
319
320
321
322
323
324
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
    "precision,     atol", [
    [torch.float32, 1e-7],
    [torch.float16, 1e-7],
    [torch.bfloat16, 5e-3]
325
])
326
def test_export_cast_ops(seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype):
327
    class TestFP8_QDQ(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
328
        def __init__(self, fake_bf16_io):
329
330
331
332
333
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
334
            self.fake_bf16_io = fake_bf16_io
335
336
337
338
339
340
341
342
343
344
345
346
347
348

        def forward(self, inp):
            ret = cast_to_fp8(
                inp,
                self.meta,
                self.fp8_tensor,
                self.fp8_type)

            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
349
350
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
351
352
353
354
355
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
Neta Zmora's avatar
Neta Zmora committed
356
357
358
    fake_bf16_io = precision == torch.bfloat16
    inp = torch.randn(hidden_size, in_features, device="cuda",
        dtype=torch.float if fake_bf16_io else precision)
359
360
    high_prec_str = dtype2str(precision)
    fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
361
    model = TestFP8_QDQ(fake_bf16_io)
362

363
364
365
366
367
368
369
    do_export(model, inp, fname)
    validate_result(fname, inp, model, atol=atol, is_fp8=True)

@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
    "precision,     atol", [
Neta Zmora's avatar
Neta Zmora committed
370
    [torch.float32, 1e-5],
Neta Zmora's avatar
Neta Zmora committed
371
372
    [torch.float16, 1e-5],
    [torch.bfloat16, 5e-3]
373
374
375
])
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
    class TestFP8_Gelu(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
376
        def __init__(self, fake_bf16_io):
377
378
379
380
381
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
382
            self.fake_bf16_io = fake_bf16_io
383
384
385
386
387
388
389
390
391
392
393
394
395

        def forward(self, inp):
            ret = fp8_gelu(
                inp,
                self.meta,
                self.fp8_tensor,
                self.fp8_type)
            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
396
397
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
398
399
400
401
402
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
Neta Zmora's avatar
Neta Zmora committed
403
404
405
    fake_bf16_io = precision == torch.bfloat16
    inp = torch.randn(hidden_size, in_features, device="cuda",
        dtype=torch.float if fake_bf16_io else precision)
406
407
    high_prec_str = dtype2str(precision)
    fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
408
    model = TestFP8_Gelu(fake_bf16_io)
409
    do_export(model, inp, fname)
Neta Zmora's avatar
Neta Zmora committed
410
    validate_result(fname, inp, model, rtol=0, atol=atol, is_fp8=True, allow_cnt_errors=2)
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432


@pytest.mark.parametrize("scale_factors",
    [(224, 224,),
])
@pytest.mark.parametrize(
    "precision,     use_fp8, use_bias, use_gelu", [
    (torch.float32, False,   False,    False),
    (torch.float16, False,   False,    False),
    (torch.float32, False,   True,     False),
    (torch.float16, False,   True,     False),
    (torch.float32, False,   True,     True),
    (torch.float16, False,   True,     True),

    # For FP8 GEMM GeLU is not used.
    (torch.float32, True,    False,    False),
    (torch.float16, True,    False,    False),
    # When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
    (torch.float16, True,    True,     False),
    (torch.bfloat16, True,   True,     False),
])
def test_export_gemm(
433
    seed_default_rng,
434
435
436
437
438
439
440
    precision, # Precision of inputs, weights, output and bias
    use_fp8,
    use_bias,
    use_gelu,
    scale_factors
):
    # Skip FP8 tests on non-hopper devices
441
442
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522

    class TestFP8_GEMM(nn.Module):
        def __init__(self, precision, use_bias, gelu, scale_factors):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision

            self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
            self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
            nb_inp_scales, nb_weight_scales = 1, out_features
            act_scale_factor, weight_scale_factor = scale_factors
            self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
            self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)

            bias_size = nb_weight_scales
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

            self.inp_type = tex.DType.kFloat8E4M3
            self.weights_type = tex.DType.kFloat8E4M3
            self.outp_type = precision

        def forward(self, inp, weight):
            inp_fp8 = cast_to_fp8(
                inp,
                self.meta_inp,
                self.fp8_tensor_inp,
                self.inp_type)

            weight_fp8 = cast_to_fp8(
                weight,
                self.meta_weight,
                self.fp8_tensor_weight,
                self.weights_type)

            ret = fp8_gemm(
                weight_fp8,
                self.meta_weight.scale_inv,
                self.fp8_tensor_weight,
                self.inp_type,
                inp_fp8,
                self.meta_inp.scale_inv,
                self.fp8_tensor_inp,
                self.weights_type,
                self.outp_type,
                get_workspace(),
                bias=self.bias,
                use_bias=self.use_bias,
                use_split_accumulator=False)
            return ret

    class Test_GEMM(nn.Module):
        def __init__(self, precision, use_bias=False, gelu=False):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision
            bias_size = out_features
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

        def forward(self, inp, weight):
            outp_type = self.precision

            # note: due to logic in lines 104:116 and L129 in cpp_extensions.py
            # it appears either bias OR gelu can be activated, not both
            ret, _, _ = gemm(
                weight,
                inp,
                outp_type,
                get_workspace(),

                # test bias
                bias=self.bias,
                use_bias=self.use_bias,

                # test gelu
                gelu=self.gelu,
                gelu_input=self.gelu_input,
Neta Zmora's avatar
Neta Zmora committed
523
524
                grad=False, # only True for backward pass
                accumulate=False,
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
            )
            return ret

    # If gelu is applied then bias must be added, as defined by TE kernel.
    if use_gelu: assert use_bias
    # Set dimensions (these are arbitrary).
    out_features = 128
    hidden_size = 256
    in_features = 64
    inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
    weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    gelu_str = "_gelu" if use_gelu else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
541
    input_names = ['input', 'weight']
542
543
    if use_fp8:
        model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors)
544
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
545
546
        if precision == torch.bfloat16:
            return
547
        validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2, is_fp8=True, input_names=input_names)
548
549
    else:
        model = Test_GEMM(precision, use_bias, use_gelu)
550
551
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
        validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2, input_names=input_names)
552
553
554
555


@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("scale_factor", [448, 112])
Neta Zmora's avatar
Neta Zmora committed
556
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
557
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
558
def test_export_layernorm(
559
    seed_default_rng,
560
561
    use_fp8: bool,
    scale_factor: float,
562
563
    precision: torch.dtype,
    zero_centered_gamma: bool
564
565
):
    # Skip FP8 tests on non-hopper devices
566
567
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584

    # Set dimensions (these are arbitrary).
    inp_shape = [64, 32]

    class Test_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
            self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
            self.eps = 1e-6 # An arbitrary small value

        def forward(self, inp):
            ret = texcpp.layernorm_fwd_inf(
                inp,
                self.weight,
                self.bias,
585
586
                self.eps,
                zero_centered_gamma)
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
            return ret

    class TestFP8_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
            self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
            self.eps = 1e-6 # An arbitrary small value

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3

        def forward(self, inp):
            ret = texcpp.layernorm_fwd_fp8_inf(
                inp,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
609
610
                self.fp8_type,
                zero_centered_gamma)
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626

            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
            return ret

    inp = torch.randn(*inp_shape, device="cuda", dtype=precision)
    model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
    high_prec_str = dtype2str(precision)
    fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
    fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, use_fp8=use_fp8)
    if precision not in (torch.bfloat16, ):
627
        validate_result(
Neta Zmora's avatar
Neta Zmora committed
628
            fname, inp, model, atol=1e-7, is_fp8=use_fp8, allow_cnt_errors=3)
629
630
631


@skip_FP8
632
@pytest.mark.parametrize("softmax_fn", [
633
634
635
    softmax_defs.ScaledUpperTriangMaskedSoftmax,
    softmax_defs.ScaledMaskedSoftmax,
    softmax_defs.ScaledSoftmax,
636
    te.softmax.FusedScaleMaskSoftmax,
637
638
639
])
# Softmax kernel only supports FP16 or BF16!
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
640
def test_export_softmax(seed_default_rng, set_max_seq_len, softmax_fn, precision):
641
    class Test_Softmax(nn.Module):
642
        def __init__(self, softmax_fn, mask_inp=False):
643
            super().__init__()
644
645
            self.softmax_fn = softmax_fn
            self.scale = 8 # arbitrary value
646
            self.mask_inp = mask_inp
647
648
649
650
651
652
653
            self.fused_scaled_softmax = None
            if self.softmax_fn == te.softmax.FusedScaleMaskSoftmax:
                self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
                    attn_mask_type="causal",
                    mask_func=te.utils.attention_mask_func,
                    softmax_in_fp32=True,
                )
654
655

        def forward(self, inp, mask):
656
657
            if self.fused_scaled_softmax:
                ret = self.fused_scaled_softmax(inp, mask, self.scale)
658
            else:
659
660
661
662
                if self.mask_inp:
                    ret = self.softmax_fn.apply(inp, mask, self.scale)
                else:
                    ret = self.softmax_fn.apply(inp, self.scale)
663
664
665
666
667
668
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
    mask = None
669
    input_names = ["input", "mask"]
670
    inp_shape = [hidden_size, in_features, in_features, in_features]
671
    if softmax_fn == softmax_defs.ScaledUpperTriangMaskedSoftmax:
672
673
        inp_shape = [hidden_size, in_features, in_features]
        kernel_str = "ScaledUpperTriangMaskedSoftmax"
674
675
        model = Test_Softmax(softmax_fn)
    elif softmax_fn == softmax_defs.ScaledMaskedSoftmax:
676
677
678
679
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(hidden_size, 1, in_features, in_features, device="cuda", dtype=precision)
        mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
        kernel_str = "ScaledMaskedSoftmax"
680
681
        model = Test_Softmax(softmax_fn, mask_inp=True)
    elif softmax_fn == softmax_defs.ScaledSoftmax:
682
        kernel_str = "ScaledSoftmax"
683
684
685
686
        model = Test_Softmax(softmax_fn)
    elif softmax_fn == te.softmax.FusedScaleMaskSoftmax:
        kernel_str = "TorchSoftmax"
        model = Test_Softmax(softmax_fn)
687
688
689
690
691
692
693
    input_tensor = torch.randn(*inp_shape, device="cuda")
    input_tensor = input_tensor.to(torch.bfloat16) if precision == torch.bfloat16 else input_tensor.half()
    high_prec_str = dtype2str(precision)
    fname = f"{kernel_str}{high_prec_str}.onnx"
    inp = (input_tensor, mask)
    do_export(model, inp, fname, input_names=input_names)
    if precision != torch.bfloat16:
694
        validate_result(fname, inp, model, atol=1e-3, input_names=input_names)
695
696


697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
# Test dynamically generated softmax mask.
# Softmax kernel only supports FP16 or BF16!
@skip_FP8
@pytest.mark.parametrize("precision", [torch.float16])
def test_softmax_mask_fn(seed_default_rng, set_max_seq_len, precision):
    class Test_Softmax(nn.Module):
        def __init__(self, use_onnx_mask_fn: bool):
            super().__init__()
            self.scale = 1 # arbitrary value
            # Use NVTE_MASKED_SOFTMAX_FUSION to force TE to use forward_torch_softmax
            # even when is_in_onnx_export_mode()==False.
            os.environ["NVTE_MASKED_SOFTMAX_FUSION"] = "0"
            self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
                attn_mask_type="causal",
                mask_func=te.utils.attention_mask_func,
                softmax_in_fp32=True,
            )

        def forward(self, inp, mask):
            ret = self.fused_scaled_softmax(inp, mask, self.scale)
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
    mask = None
    inp_shape = [hidden_size, in_features, in_features, in_features]
    input_tensor = torch.randn(*inp_shape, device="cuda")
    input_tensor = input_tensor.to(torch.bfloat16) if precision == torch.bfloat16 else input_tensor.half()
    inp = (input_tensor, mask)
    high_prec_str = dtype2str(precision)

    # Compare the outputs of TE when using the default softmax mask
    # to the TE outputs produced when using the ONNX-compatible causal mask.
    model = Test_Softmax(use_onnx_mask_fn=False)
    te_outputs_default_mask = te_infer(model, inp, is_fp8=True)
    with te.onnx_export(True):
        # ONNX export mode forces use of the ONNX-compatible causal mask.
        model_onnx_mask = Test_Softmax(use_onnx_mask_fn=True)
        te_outputs_onnx_mask = te_infer(model_onnx_mask, inp, is_fp8=True)
    compare_outputs(te_outputs_default_mask, te_outputs_onnx_mask,
        atol=0, rtol=0, max_errors_printed=10, allow_cnt_errors=0, fname="softmax masking")

    # Compare the outputs of TE when using the default softmax mask
    # to the ORT ONNX outputs produced when using the ONNX-compatible causal mask.
    input_names = ["input", "mask"]
    kernel_str = "FusedScaleMaskSoftmax"
    fname = f"{kernel_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, input_names=input_names)
    if precision != torch.bfloat16:
        validate_result(fname, inp, model_onnx_mask, atol=1e-3, input_names=input_names, te_outputs=te_outputs_default_mask)


750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
    "precision,     use_bias",[
    (torch.float32, False),
    (torch.float32, True),
    (torch.float16, False),
    (torch.float16, True),
    # Todo: cannot configure BF16 when bias is disabled (ORT issue?)
    (torch.bfloat16, False),
    # Todo: cannot configure BF16 when bias is enabled (ORT issue?)
    # (torch.bfloat16, True),
])
def test_export_linear(
766
    seed_default_rng,
767
768
769
770
771
772
773
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    precision: torch.dtype
):
    # Skip FP8 tests on non-hopper devices
774
775
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    class Test_Linear(nn.Module):
        def __init__(self,
                in_features,
                out_features,
                use_bias,
                return_bias,
                precision
            ):
            super().__init__()
            self.linear = te.Linear(
                in_features,
                out_features,
                bias=use_bias,
                return_bias=return_bias,
                params_dtype=precision
            )

        def forward(self, inp):
            ret = self.linear(inp)
            return ret

    inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
    with te.fp8_autocast(enabled=use_fp8):
        model = Test_Linear(
            in_features,
            out_features,
            use_bias,
            return_bias,
            precision
        ).to(device='cuda')
        if use_fp8:
817
            set_layer_scale(model.linear, scale_factor, num_gemms=1)
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
        do_export(model, inp, fname, use_fp8)

        if precision in (torch.bfloat16, ):
            return
        if not use_fp8:
            validate_result(fname, inp, model, atol=1e-3)
        else:
            validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8)


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
    "precision,     use_bias",[
    (torch.float32, False),
    (torch.float32, True),
    (torch.float16, True),
    (torch.float16, False),
])
840
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
841
def test_export_layernorm_linear(
842
    seed_default_rng,
843
844
845
846
847
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
848
849
    precision: torch.dtype,
    zero_centered_gamma: bool
850
851
):
    # Skip FP8 tests on non-hopper devices
852
853
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
854
855
856
857
858
859
860
861
862
863
864

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
865

866
867
868
869
870
871
872
873
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormLinear(
            hidden_size,
            3 * hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
874
            zero_centered_gamma=zero_centered_gamma,
875
876
        ).to(device='cuda')
        if use_fp8:
877
            set_layer_scale(model, scale_factor, num_gemms=1)
878
879
880
        do_export(model, inp, fname, use_fp8)
        if not use_fp8:
            validate_result(fname, inp, model, atol=1e-3)
Neta Zmora's avatar
Neta Zmora committed
881
882
        elif precision != torch.bfloat16:
            validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8)
883
884
885
886
887
888
889
890
891
892
893
894
895
896


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
    "precision,     use_bias",[
    (torch.float32, False),
    (torch.float32, True),
    (torch.float16, True),
    (torch.float16, False),
])
897
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
898
def test_export_layernorm_mlp(
899
    seed_default_rng,
900
901
902
903
904
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
905
906
    precision: torch.dtype,
    zero_centered_gamma: bool
907
908
):
    # Skip FP8 tests on non-hopper devices
909
910
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256
    ffn_hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}.onnx"
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormMLP(
            hidden_size,
            ffn_hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
931
            zero_centered_gamma=zero_centered_gamma,
932
933
        ).to(device='cuda')
        if use_fp8:
934
            set_layer_scale(model, scale_factor, num_gemms=2)
935
936
937
938
        do_export(model, inp, fname, use_fp8)
        if not use_fp8:
            validate_result(fname, inp, model, atol=1e-3)
        else:
Neta Zmora's avatar
Neta Zmora committed
939
            validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8)
940
941
942
943
944
945
946
947
948
949
950

@skip_FP8
@pytest.mark.parametrize(
    "precision,     use_mask, attn_mask_type", [
    (torch.float32, False,    None),      # calls forward_torch_softmax
    (torch.float32, True,     None),      # calls forward_torch_softmax
    (torch.float16, False,    "causal"),  # calls ScaledUpperTriangMaskedSoftmax
    (torch.float16, True,     "padding"), # calls ScaledMaskedSoftmax
    (torch.float16, False,    "padding"), # calls ScaledSoftmax
])
def test_export_core_attention(
951
952
    seed_default_rng,
    set_max_seq_len,
953
954
955
956
957
    precision: torch.dtype,
    use_mask: bool,
    attn_mask_type: str,
):
    # Set dimensions (these are arbitrary).
958
959
    seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
    qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
960
961
962
963

    query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
964
    input_names = ["query", "key", "value", "attention_mask"]
965
966
967
968
969
970
971
972
973
    attention_mask = None
    if use_mask:
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(qkv_size[1], qkv_size[2], qkv_size[0], qkv_size[0], device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
    inp = (query_layer, key_layer, value_layer, attention_mask)

    mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    high_prec_str = dtype2str(precision)
974
    fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
975
976
977

    if attn_mask_type is None:
        attn_mask_type = 'causal'
978
        input_names = ["query", "key", "value"]
979
        inp = (query_layer, key_layer, value_layer)
980
    model = te.attention.DotProductAttention(
981
982
983
984
985
986
987
988
989
990
        num_attention_heads=num_attention_heads,
        kv_channels=kv_channels,
        attention_dropout=0.5,
        attn_mask_type=attn_mask_type,
    ).to(device='cuda')
    do_export(model,
            inp,
            fname,
            input_names=input_names,
            use_fp8=True)
991
    validate_result(fname, inp, model, atol=1e-2, input_names=input_names)
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005


test_configs_multihead_attention = [
    #"use_mask, attn_mask_type"
    (False,    "causal"),  # calls ScaledUpperTriangMaskedSoftmax
    (True,     "padding"), # calls ScaledMaskedSoftmax
    (False,    "padding"), # calls ScaledSoftmax
]
test_configs_attention_type = [
    #"input_layernorm, attention_type, fuse_qkv_params"
    (True,             "self",         True),
    (False,            "self",         True),
    (True,             "self",         False),
    (False,            "self",         False),
Neta Zmora's avatar
Neta Zmora committed
1006
1007
    (True,             "cross",        True),
    (False,            "cross",        True),
1008
    (True,             "cross",        False),
Neta Zmora's avatar
Neta Zmora committed
1009
    (False,            "cross",        False),
1010
1011
1012
1013
1014
1015
1016
]
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("precision", [torch.float32, torch.float16])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type)
def test_export_multihead_attention(
1017
1018
    seed_default_rng,
    set_max_seq_len,
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    precision: torch.dtype,
    return_layernorm_output: bool,
    input_layernorm: bool,
    attention_type: str,
    fuse_qkv_params: bool
):
    # Skip FP8 tests on non-hopper devices
1029
1030
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049

    hidden_size = 256
    sequence_length = 128
    batch_size = 4
    num_attention_heads = 32
    kv_channels = 8
    attention_dropout = 0.1
    layernorm_epsilon = 1e-5
    init_method = output_layer_init_method = get_default_init_method()
    attention_args = (
        hidden_size,
        num_attention_heads,
        kv_channels,
        attention_dropout,
        layernorm_epsilon,
        init_method,
        output_layer_init_method,
    )

1050
    hidden_states_context = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
1051
1052
1053
1054
1055
1056
1057
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)

    encoder_output = None
1058

1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
    if attention_type == "cross":
        encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")

    fp8_str = "_fp8" if use_fp8 else ""
    dtype_str = dtype2str(precision)
    attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
    fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    input_ln_str = "_input-ln" if input_layernorm else ""
    fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"

1070
    model = te.attention.MultiHeadAttention(
1071
1072
1073
1074
1075
1076
1077
1078
        *attention_args,
        attn_mask_type=attn_mask_type,
        params_dtype=precision,
        return_layernorm_output=return_layernorm_output,
        input_layernorm=input_layernorm,
        attention_type=attention_type,
        fuse_qkv_params=fuse_qkv_params,
    ).to(device='cuda')
1079
1080
1081
1082
1083
1084
1085

    inp_context = (hidden_states_context, attention_mask, encoder_output)
    input_names = ["hidden_states", "attention_mask", "encoder_output"]
    output_names=["attention_output", "attention_bias"]
    do_export(model, inp_context, fname, use_fp8, input_names=input_names, output_names=output_names,
        dynamic_axes={"hidden_states": {0: "seq", 1:"bs"},
                      "attention_output": {0: "seq", 1:"bs"}})
1086
    if not use_fp8:
1087
        validate_result(fname, inp_context, model, atol=1e-3, input_names=input_names, output_names=output_names)
1088
    else:
1089
        validate_result(fname, inp_context, model, atol=1e-2, is_fp8=use_fp8,
Neta Zmora's avatar
Neta Zmora committed
1090
            input_names=input_names, output_names=output_names, allow_cnt_errors=3)
1091

1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
    # In GPT generative phase (inference) the input sequence is smaller than the maximum
    # allowed sequence length and we want to test this condition.
    # Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
    is_generative_phase = (attn_mask_type == "causal" and attention_type == "self")
    if is_generative_phase:
        seq_len_offset = 8
        hidden_states_generative = torch.randn(sequence_length-seq_len_offset, batch_size, hidden_size, dtype=precision, device="cuda")
        inp_generative = (hidden_states_generative, attention_mask, encoder_output)
        if not use_fp8:
            validate_result(fname, inp_generative, model, atol=1e-3, input_names=input_names, output_names=output_names)
        else:
            validate_result(fname, inp_generative, model, atol=1e-2, is_fp8=use_fp8,
                input_names=input_names, output_names=output_names, allow_cnt_errors=3)



1108
1109
1110
1111
1112
1113
1114
1115
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("output_layernorm", [
    #True, # TO DO: handle this
    False
])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16])
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
1116
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
1117
def test_export_transformer_layer(
1118
1119
    seed_default_rng,
    set_max_seq_len,
1120
1121
1122
1123
1124
1125
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    output_layernorm: bool,
    precision: torch.dtype,
    fuse_qkv_params: bool,
1126
    zero_centered_gamma: bool
1127
1128
):
    # Skip FP8 tests on non-hopper devices
1129
1130
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1131
1132
1133
1134
1135
1136
1137
1138
1139

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4

    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
1140
    input_names = ["input", "attention_mask"]
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
    inp = (input_tensor, attention_mask)

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
1152
    fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"
1153
1154
1155
1156
1157
1158
1159
1160
1161

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
        self_attn_mask_type=attn_mask_type,
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
1162
        zero_centered_gamma=zero_centered_gamma).to(device='cuda')
1163
    do_export(model, inp, fname, use_fp8, input_names=input_names)
1164
    if not use_fp8:
1165
1166
        validate_result(fname, inp, model, atol=1e-3, input_names=input_names)
    else:
Neta Zmora's avatar
Neta Zmora committed
1167
        validate_result(fname, inp, model, atol=5e-1, is_fp8=use_fp8, input_names=input_names)
1168

Neta Zmora's avatar
Neta Zmora committed
1169
1170
1171
1172
1173
1174
1175

@pytest.mark.parametrize("use_fp8", [True])
@pytest.mark.parametrize("ln_scale_factor", [448*2])
@pytest.mark.parametrize("gemm_scale_factors", [(224, 224,),])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
1176
    seed_default_rng,
Neta Zmora's avatar
Neta Zmora committed
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
    use_fp8: bool,
    ln_scale_factor: float,
    gemm_scale_factors: Tuple[float, float],
    precision: torch.dtype,
    zero_centered_gamma: bool
):
    """This is a regression test for testing that all LN inputs have the same type.

    The test sets up GEMM with FP32 output which feeds into an LN that is configured
    with FP16 or BF16 weights and bias.
    """

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
    class TestFP8_GemmLayernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
            self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
            self.eps = 1e-6 # An arbitrary small value

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(ln_scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3
            self.gemm = TestFP8_GEMM(
                precision, use_bias=False, gelu=False, scale_factors=gemm_scale_factors)

        def forward(self, inp, weight):
            x = self.gemm(inp, weight)
            x = texcpp.layernorm_fwd_fp8_inf(
                x,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                zero_centered_gamma)

            x = cast_from_fp8(
                x,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
            return x

    out_features = 128
    hidden_size = 128
    in_features = 128
    class TestFP8_GEMM(nn.Module):
        def __init__(self, precision, use_bias, gelu, scale_factors):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision

            self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
            self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
            nb_inp_scales, nb_weight_scales = 1, out_features
            act_scale_factor, weight_scale_factor = scale_factors
            self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
            self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)

            bias_size = nb_weight_scales
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

            self.inp_type = tex.DType.kFloat8E4M3
            self.weights_type = tex.DType.kFloat8E4M3
            self.outp_type = precision

        def forward(self, inp, weight):
            inp_fp8 = cast_to_fp8(
                inp,
                self.meta_inp,
                self.fp8_tensor_inp,
                self.inp_type)

            weight_fp8 = cast_to_fp8(
                weight,
                self.meta_weight,
                self.fp8_tensor_weight,
                self.weights_type)

            ret = fp8_gemm(
                weight_fp8,
                self.meta_weight.scale_inv,
                self.fp8_tensor_weight,
                self.inp_type,
                inp_fp8,
                self.meta_inp.scale_inv,
                self.fp8_tensor_inp,
                self.weights_type,
                self.outp_type,
                get_workspace(),
                bias=self.bias,
                use_bias=self.use_bias,
                use_split_accumulator=False)
            return ret

    inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
    weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
    model = TestFP8_GemmLayernorm()
    high_prec_str = dtype2str(precision)
    fp8_str = f"_fp8" if use_fp8 else ""
    fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
1286
1287
    input_names = ['input', 'weight']
    do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
Neta Zmora's avatar
Neta Zmora committed
1288
1289
    if precision not in (torch.bfloat16, ):
        validate_result(
1290
            fname, (inp, weight), model, atol=5e-2, is_fp8=use_fp8, allow_cnt_errors=2, input_names=input_names)
Neta Zmora's avatar
Neta Zmora committed
1291
1292


1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
@pytest.mark.parametrize("precision", [torch.float16])
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
    seed_default_rng,
    set_max_seq_len,
    use_fp8: bool,
    precision: torch.dtype,
    zero_centered_gamma: bool
):
    """Test that the ONNX model can correctly handle inputs with different shapes and that
    the attention mask it adjusted on-the-fly to different sequence lengths.
    """

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4
    attention_mask = None
    use_mask = True
    attn_mask_type = "causal"
    fuse_qkv_params = True
    output_layernorm = False

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
        self_attn_mask_type=attn_mask_type,
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
        zero_centered_gamma=zero_centered_gamma).to(device='cuda')

    # "Context phase": use full input sequence length
    input_names = ["input"]
    output_names = ["output"]
    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
    inp = (input_tensor,)
    do_export(model, inp, fname, use_fp8,
        input_names=input_names, output_names=output_names,
        dynamic_axes={"input": {0: "seq", 1:"bs"},
                      "output": {0: "seq", 1:"bs"}, })
    validate_result(fname, inp, model, atol=5e-3, is_fp8=use_fp8, input_names=input_names)

    # "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
    sequence_length = 1 if not use_fp8 else 8
    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
    inp = (input_tensor, attention_mask)
    validate_result(fname, inp, model, atol=5e-3, is_fp8=use_fp8, input_names=input_names)


1358
1359
1360
1361
1362
1363
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
    assert is_in_onnx_export_mode() == False
    with te.onnx_export(enabled):
        assert is_in_onnx_export_mode() == enabled
    assert is_in_onnx_export_mode() == False