quantizer.py 44.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Tensor quantization classes for TE/JAX.

This module provides classes and utilities for quantizing tensors in JAX.
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from functools import partial
12
13
from typing import Union, Optional, Tuple
import warnings
14
15
16
17

import jax
import jax.numpy as jnp
from jax.tree_util import register_pytree_node_class
18
from transformer_engine_jax import QuantizeLayout
19
from transformer_engine.common import recipe
20
21

from .scaling_modes import ScalingMode
22
from .hadamard import apply_rht, should_use_rht
23
24
25
26
27
28
29
from .tensor import (
    ScaledTensor,
    ScaledTensor1x,
    ScaledTensor2x,
    ScaledTensorFactory,
    NoScaleTensor,
)
30
from .helper import (
31
    get_quantize_config,
32
    get_quantize_config_with_recipe,
33
    AmaxComputeAlgo,
34
    TensorSource,
35
)
36
from .device_utils import is_fp8_gemm_with_all_layouts_supported
37
38

__all__ = [
39
    "QuantizeLayout",
40
41
    "Quantizer",
    "QuantizerSet",
42
    "CurrentScaleQuantizer",
43
44
    "DelayedScaleQuantizer",
    "BlockScaleQuantizer",
45
    "GroupedQuantizer",
46
47
    "QuantizerFactory",
    "noop_quantizer_set",
48
    "compute_scale_from_amax",
49
50
51
]


52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def compute_scale_from_amax(
    amax: jnp.ndarray, q_dtype: jnp.dtype, scale: Optional[jnp.ndarray] = None
) -> jnp.ndarray:
    """Compute scale from amax value.

    Args:
        amax: Maximum absolute value of the tensor
        q_dtype: Quantization data type

    Returns:
        Scale value
    """
    fp8_max = jnp.astype(jnp.finfo(q_dtype).max, jnp.float32)
    if scale is None:
        scale = jnp.ones((1,))
67
    sf = (fp8_max / amax) / (2 ** get_quantize_config().MARGIN)
68
69
    sf = jnp.where(amax > 0.0, sf, scale)
    sf = jnp.where(jnp.isfinite(amax), sf, scale)
70
    assert sf.shape == (1,)
71
72
73
    return sf


74
75
76
77
78
79
80
81
82
83
84
@register_pytree_node_class
@dataclass
class Quantizer(ABC):
    """Base class for quantizers.

    This abstract class defines the interface for tensor quantization, providing
    methods for quantization and scale management.

    Attributes:
        q_dtype: The data type for quantized values
        scaling_mode: The scaling mode to use for quantization
85
        q_layout: The quantization axis (row-wise, column-wise, or both)
86
87
88
89
    """

    q_dtype: jnp.dtype
    scaling_mode: ScalingMode
90
    q_layout: QuantizeLayout
91
    data_layout: str
92
93
94
95
96
97
98
99

    def tree_flatten(self):
        """Flatten the quantizer for JAX tree operations.

        Returns:
            Tuple of (children, aux_data) for tree operations
        """
        children = ()
100
        aux_data = (self.q_dtype, self.scaling_mode, self.q_layout, self.data_layout)
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
        return (children, aux_data)

    @classmethod
    def tree_unflatten(cls, aux_data, children):
        """Reconstruct a quantizer from its flattened representation.

        Args:
            aux_data: Auxiliary data containing quantizer parameters
            children: Unused children data

        Returns:
            A reconstructed Quantizer instance
        """
        return cls(*aux_data, *children)

    def update(self, *args, **kwargs):
        """Update quantizer state (no-op in base class)."""
        del args, kwargs

    def is_2x2x(self) -> bool:
        """Check if quantizer uses both row-wise and column-wise quantization.

        Returns:
            True if using both row-wise and column-wise quantization
        """
126
        return self.q_layout == QuantizeLayout.ROWWISE_COLWISE
127

128
    def get_data_layout(self) -> str:
129
        """Get the data data_layout string.
130
131

        Returns:
132
            Data data_layout in string format
133
134
135

        Raises:
            ValueError: If quantization axis is invalid
136
        """
137
138
139
140
141
142
143
        if self.q_layout == QuantizeLayout.ROWWISE_COLWISE:
            return self.data_layout
        if self.q_layout == QuantizeLayout.ROWWISE:
            return self.data_layout[0]
        if self.q_layout == QuantizeLayout.COLWISE:
            return self.data_layout[1]
        raise ValueError(f"Invalid q_layout: {self.q_layout}")
144
145

    @abstractmethod
146
    def _quantize_func(self, x, is_colwise=False, dq_dtype=None, flatten_axis=-1) -> ScaledTensor1x:
147
148
149
150
151
152
        """Core quantization function to be implemented by subclasses.

        Args:
            x: Input tensor to quantize
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values, default is x.dtype
153
            flatten_axis: The quantization axis for the tensor
154
155
156
157
158

        Returns:
            A ScaledTensor1x containing the quantized data
        """

159
    def quantize(
160
        self, x, is_rowwise=None, is_colwise=None, dq_dtype=None, flatten_axis=-1, **kwargs
161
    ) -> ScaledTensor:
162
163
164
165
166
167
168
        """Quantize a tensor using the internal _quantize_func().

        Args:
            x: Input tensor to quantize
            is_rowwise: Whether to use row-wise quantization
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
169
            flatten_axis: The quantization axis for the tensor
170
171
172
173

        Returns:
            A ScaledTensor1x or ScaledTensor2x containing the quantized data
        """
174
        del kwargs
175
176
177
178
179
180
181
182
183
184
185
186

        is_rowwise = (
            is_rowwise
            if is_rowwise is not None
            else (self.q_layout == QuantizeLayout.ROWWISE or self.is_2x2x())
        )
        is_colwise = (
            is_colwise
            if is_colwise is not None
            else (self.q_layout == QuantizeLayout.COLWISE or self.is_2x2x())
        )

187
        if (is_rowwise and is_colwise) or self.is_2x2x():
188
189
190
191
            rowwise_tensor = self._quantize_func(x, dq_dtype=dq_dtype, flatten_axis=flatten_axis)
            colwise_tensor = self._quantize_func(
                x, is_colwise=True, dq_dtype=dq_dtype, flatten_axis=flatten_axis
            )
192
193
194
            return ScaledTensor2x(rowwise_tensor, colwise_tensor)

        if is_colwise:
195
196
197
            return self._quantize_func(
                x, is_colwise=True, dq_dtype=dq_dtype, flatten_axis=flatten_axis
            )
198

199
        return self._quantize_func(x, dq_dtype=dq_dtype, flatten_axis=flatten_axis)
200

201
    def get_scale_shapes(self, data_shape, is_padded=True, flatten_axis=-1, **kwargs):
202
203
204
205
206
207
208
209
210
        """Get shapes for scale tensors.

        Args:
            data_shape: Shape of the input tensor
            is_padded: Whether to use padded shapes

        Returns:
            Tuple of (rowwise_scale_shape, colwise_scale_shape)
        """
211
        del kwargs
212
        return self.scaling_mode.get_scale_shape_2x(data_shape, is_padded, flatten_axis)
213
214
215
216
217
218
219
220
221
222
223
224

    def get_scale_dtype(self):
        """Get the data type for scale tensors.

        Returns:
            The data type for scale tensors
        """
        return self.scaling_mode.get_scale_dtype()


@register_pytree_node_class
@dataclass
225
226
class CurrentScaleQuantizer(Quantizer):
    """Quantizer implementation using current scaling.
227

228
    This quantizer uses current scaling mode with float32 scales
229
230
231

    Attributes:
        scaling_mode: Set to NVTE_DELAYED_TENSOR_SCALING
232
        q_layout: Quantization axis (default: ROWWISE_COLWISE)
233
234
    """

235
    scaling_mode: ScalingMode = ScalingMode.CURRENT_TENSOR_SCALING
236
    q_layout: QuantizeLayout = QuantizeLayout.ROWWISE_COLWISE
237
    data_layout: str = "NT"
238
239

    def _quantize_func(
240
241
242
243
244
        self,
        x: Union[jnp.ndarray, NoScaleTensor],
        is_colwise=False,
        dq_dtype=None,
        flatten_axis=-1,
245
    ) -> ScaledTensor1x:
246
247
248
249
250
251
        """Quantize function helper for delayed scaling FP8.

        Args:
            x: Input tensor to quantize
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
252

253
254
255
        Returns:
            A ScaledTensor1x containing the quantized data
        """
256
257
258
259
        if isinstance(x, jnp.ndarray):
            x = NoScaleTensor(data=x, amax=None)

        dq_dtype = dq_dtype if dq_dtype is not None else x.data.dtype
260

261
        compute_dtype = jnp.float32
262
        dtype_max = (jnp.finfo(self.q_dtype).max).astype(compute_dtype)
263
        amax = x.amax or jnp.max(jnp.abs(x.data)).reshape((1,))
264
        fp8_max = jnp.astype(jnp.finfo(self.q_dtype).max, jnp.float32)
265
        scale = (fp8_max / amax) / (2 ** get_quantize_config().MARGIN)
266
        scaled_x = x.data.astype(compute_dtype) * scale
267
268

        clipped_scaled_x = jnp.clip(scaled_x, -dtype_max, dtype_max).astype(self.q_dtype)
269
        scale_inv = 1.0 / scale
270
271
272
273
274
        return ScaledTensorFactory.create_1x(
            data=clipped_scaled_x,
            scale_inv=scale_inv,
            scaling_mode=self.scaling_mode,
            dq_dtype=dq_dtype,
275
            flatten_axis=flatten_axis,
276
277
        )

278
279
280
    def quantize(
        self, x, is_rowwise: bool = None, is_colwise: bool = None, dq_dtype=None, flatten_axis=-1
    ):
281
282
283
284
285
286
287
        """Quantize a tensor using the internal _quantize_func().

        Args:
            x: Input tensor to quantize
            is_rowwise: Whether to use row-wise quantization
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
288
            flatten_axis: The quantization axis for the tensor
289
290
291
292

        Returns:
            A ScaledTensor1x or ScaledTensor2x containing the quantized data
        """
293
294
295
296
        if isinstance(x, jnp.ndarray):
            x = NoScaleTensor(data=x, amax=None)

        dq_dtype = dq_dtype if dq_dtype is not None else x.data.dtype
297
298
299
300
        if flatten_axis < 0:
            flatten_axis += x.ndim
        assert 0 < flatten_axis < x.ndim, "flatten_axis is out of bounds!"

301
302
303
        is_rowwise = (
            is_rowwise
            if is_rowwise is not None
304
            else (self.q_layout == QuantizeLayout.ROWWISE or self.is_2x2x())
305
306
307
308
        )
        is_colwise = (
            is_colwise
            if is_colwise is not None
309
            else (self.q_layout == QuantizeLayout.COLWISE or self.is_2x2x())
310
311
        )

312
        rowwise_tensor = self._quantize_func(x, dq_dtype=dq_dtype, flatten_axis=flatten_axis)
313
314
315
        colwise_tensor = None
        if is_colwise:
            colwise_tensor = ScaledTensorFactory.create_1x(
316
317
318
                data=jnp.transpose(
                    rowwise_tensor.data, (*range(flatten_axis, x.ndim), *range(flatten_axis))
                ),
319
320
321
322
                scale_inv=rowwise_tensor.scale_inv,
                scaling_mode=self.scaling_mode,
                dq_dtype=dq_dtype,
                is_colwise=True,
323
324
                data_layout="T",
                flatten_axis=flatten_axis,
325
            )
326

327
328
329
330
331
332
        if is_colwise and is_rowwise:
            return ScaledTensor2x(rowwise_tensor, colwise_tensor)
        if is_colwise:
            return colwise_tensor
        return rowwise_tensor

333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353

@register_pytree_node_class
@dataclass
class DelayedScaleQuantizer(CurrentScaleQuantizer):
    """Quantizer implementation using delayed scaling.

    This quantizer uses delayed scaling mode with float32 scales and maintains
    a history of maximum absolute values for dynamic scaling.

    Attributes:
        scaling_mode: Set to NVTE_DELAYED_TENSOR_SCALING
        q_layout: Quantization axis (default: ROWWISE_COLWISE)
        scale: Current scaling factor
        amax_history: History of maximum absolute values
    """

    scaling_mode: ScalingMode = ScalingMode.DELAYED_TENSOR_SCALING
    q_layout: QuantizeLayout = QuantizeLayout.ROWWISE_COLWISE

    scale: jnp.ndarray = field(default_factory=lambda: jnp.ones((1,), jnp.float32))
    amax_history: jnp.ndarray = field(
354
        default_factory=lambda: jnp.zeros((get_quantize_config().AMAX_HISTORY_LEN,), jnp.float32)
355
356
357
358
359
360
361
362
363
    )

    def tree_flatten(self):
        """Flatten the quantizer for JAX tree operations.

        Returns:
            Tuple of (children, aux_data) for tree operations
        """
        children = (self.scale, self.amax_history)
364
        aux_data = (self.q_dtype, self.scaling_mode, self.q_layout, self.data_layout)
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
        return (children, aux_data)

    def _quantize_func(
        self, x: jnp.ndarray, is_colwise=False, dq_dtype=None, flatten_axis=-1
    ) -> ScaledTensor1x:
        """Quantize function helper for delayed scaling FP8.

        Args:
            x: Input tensor to quantize
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
            flatten_axis: The quantization axis for the tensor
        Returns:
            A ScaledTensor1x containing the quantized data
        """
380
381
382
383
        if isinstance(x, jnp.ndarray):
            x = NoScaleTensor(data=x, amax=None)

        dq_dtype = dq_dtype if dq_dtype is not None else x.data.dtype
384
385
386

        compute_dtype = jnp.float32
        dtype_max = (jnp.finfo(self.q_dtype).max).astype(compute_dtype)
387
        scaled_x = x.data.astype(compute_dtype) * self.scale
388
389
390
391
392
393
394
395

        # quantize() in the old dot.py do this way, leave this code block here for future debugging
        # compute_dtype = x.dtype
        # dtype_max = (jnp.finfo(self.q_dtype).max).astype(compute_dtype)
        # scaled_x = x * self.scale.astype(compute_dtype)

        clipped_scaled_x = jnp.clip(scaled_x, -dtype_max, dtype_max).astype(self.q_dtype)
        scale_inv = 1.0 / self.scale
396
        amax = x.amax or jnp.max(jnp.abs(x.data)).reshape((1,))
397
        # Note, this updating of amax here will only be called once because the "quantize" method impl inherited from CurrentScaleQuantizer only calls _quantize_func once then transposes the result for colwise quantization. So we don't have to worry about update being called twice for 2x2x quantization.
398
        self.update(amax)
399
400
401
402
403
404
405
406
        return ScaledTensorFactory.create_1x(
            data=clipped_scaled_x,
            scale_inv=scale_inv,
            scaling_mode=self.scaling_mode,
            dq_dtype=dq_dtype,
            flatten_axis=flatten_axis,
        )

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
    @staticmethod
    @jax.jit
    def _update_amax_history(amax_history, new_amax):
        """Update AMAX history with new maximum value.

        Args:
            amax_history: Current AMAX history
            new_amax: New maximum value to add

        Returns:
            Updated AMAX history
        """
        amax_history = amax_history.at[0].set(new_amax[0])
        return amax_history

    @staticmethod
    @partial(jax.jit, static_argnums=(2,))
    def _compute_scale(amax_history, scale, q_dtype):
        """Compute new scale based on AMAX history.

        Args:
            amax_history: History of maximum absolute values
            scale: Current scale
            q_dtype: Quantization data type

        Returns:
            Updated scale value
        """
        # 2. Calculate the current scale
436
        if get_quantize_config().AMAX_COMPUTE_ALGO is AmaxComputeAlgo.MAX:
437
438
439
440
            amax = jnp.max(amax_history, axis=-1, keepdims=True)
        else:
            amax = amax_history[0:1]

441
        return compute_scale_from_amax(amax, q_dtype, scale=scale)
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478

    @staticmethod
    @jax.jit
    def _roll_and_reset_amax_history(amax_history):
        """Roll AMAX history and reset first element.

        Args:
            amax_history: Current AMAX history

        Returns:
            Updated AMAX history
        """
        updated_amax_history = jnp.roll(amax_history, -1, -1)
        amax_history = updated_amax_history.at[0].set(0.0)
        return amax_history

    def update(self, new_amax: jnp.ndarray):
        """Update AMAX history and compute new scale.

        Args:
            new_amax: New maximum absolute value to add to history
        """
        amax_history = self._update_amax_history(self.amax_history, new_amax)
        self.scale = self._compute_scale(amax_history, self.scale, self.q_dtype)
        self.amax_history = self._roll_and_reset_amax_history(amax_history)


@register_pytree_node_class
@dataclass
class BlockScaleQuantizer(Quantizer):
    """Quantizer implementation using block-based scaling.

    This quantizer uses block scaling mode with FP8 scales and block-based
    quantization for improved efficiency.

    Attributes:
        scaling_mode: Set to NVTE_MXFP8_1D_SCALING
479
        q_layout: Quantization axis (default: ROWWISE_COLWISE)
480
481
    """

482
    scaling_mode: ScalingMode = ScalingMode.MXFP8_1D_SCALING
483
    q_layout: QuantizeLayout = QuantizeLayout.ROWWISE_COLWISE
484
    data_layout: str = "NN"
485

486
    def _quantize_func(self, x, is_colwise=False, dq_dtype=None, flatten_axis=-1) -> ScaledTensor1x:
487
488
489
490
491
492
        """Quantize function helper for block scaling FP8.

        Args:
            x: Input tensor to quantize
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
493
            flatten_axis: The quantization axis for the tensor
494
495
496
497

        Returns:
            A ScaledTensor1x containing the quantized data
        """
498
499
500
501
        if isinstance(x, NoScaleTensor):
            # No need for amax in MXFP8 block scaling, so simply extract the jnp.ndarray data tensor from the NoScaleTensor x.
            x = x.data

502
        # TODO(Phuong): use quantize_func from JAX
503
504
505
506
507
508
        if flatten_axis < 0:
            flatten_axis = x.ndim + flatten_axis
        assert (
            0 <= flatten_axis < x.ndim
        ), f"Invalid flatten_axis: {flatten_axis} for tensor of shape {x.shape}"

509
510
        dq_dtype = dq_dtype if dq_dtype is not None else x.dtype
        x_shape = x.shape
511
        scale_shape = self.scaling_mode.get_scale_shape(
512
            x_shape, is_colwise=is_colwise, is_padded=False, flatten_axis=flatten_axis
513
        )
514
515
        scale_dtype = self.scaling_mode.get_scale_dtype()
        x = x.reshape(
516
517
518
519
            *x_shape[: flatten_axis - 1],
            scale_shape[flatten_axis - 1],
            int(x_shape[flatten_axis - 1] / scale_shape[flatten_axis - 1]),
            *x_shape[flatten_axis:-1],
520
521
522
            scale_shape[-1],
            int(x_shape[-1] / scale_shape[-1]),
        )
523
        amax = jnp.max(jnp.abs(x), axis=(flatten_axis + 2 - 2, -1), keepdims=True)
524
525
526
527
528
529
530
531
532
533
534
535
536
        MAX = jnp.finfo(self.q_dtype).max.astype(jnp.float32)
        scales = amax.astype(jnp.float32) / MAX

        scales_q = self._cast_to_e8m0_with_rounding_up(scales)
        scaled_x = x / self._e8m0_to_dtype(scales_q, jnp.float32)

        clipped_x = jnp.clip(scaled_x, -MAX, MAX)
        x_q = clipped_x.astype(self.q_dtype).reshape(x_shape)
        scales_q = scales_q.reshape(scale_shape).view(scale_dtype)

        return ScaledTensorFactory.create_1x(
            x_q,
            scales_q,
537
            scaling_mode=self.scaling_mode,
538
539
            is_colwise=is_colwise,
            dq_dtype=dq_dtype,
540
            flatten_axis=flatten_axis,
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
        )

    def _cast_to_e8m0_with_rounding_up(self, scales):
        """Cast scales to E8M0 format with rounding up.

        Args:
            scales: Input scales to convert

        Returns:
            Scales in E8M0 format
        """
        temp = scales.astype(jnp.float32).view(jnp.uint32)
        exp = temp >> 23
        mant = temp & 0x7FFFFF
        is_ru = jnp.logical_and(
            jnp.logical_and((mant > 0), (exp != 0xFE)),
            ~jnp.logical_and((exp == 0), (mant <= 0x400000)),
        )
        exp = jnp.where(is_ru, exp + 1, exp)
        new_scales = exp.astype(jnp.uint8)
        return new_scales

    def _e8m0_to_dtype(self, x, dtype):
        """Convert E8M0 format to specified data type.

        Args:
            x: Input in E8M0 format
            dtype: Target data type

        Returns:
            Converted values in target data type
        """
        temp = x.astype(jnp.uint32)
        exp = temp << 23
        new_x = exp.view(jnp.float32)
        near_zero_value = 2**-15 if dtype == jnp.float16 else 2**-127
        new_x = jnp.where(new_x == 0, jnp.array(near_zero_value, jnp.float32), new_x)
        return new_x.astype(dtype)


581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
@register_pytree_node_class
@dataclass
class NVFP4Quantizer(Quantizer):
    """Quantizer implementation using current scaling.

    This quantizer uses current scaling mode with float32 scales

    Attributes:
        scaling_mode: Set to NVFP4_1D_SCALING or NVFP4_2D_SCALING
        q_layout: Quantization axis
        data_layout: Data layout string (default: "NT")
        stochastic_rounding_rng_state: RNG state for stochastic rounding, must be of shape (4,) and dtype uint32. If None, stochastic rounding is disabled.
    """

    scaling_mode: ScalingMode = ScalingMode.NVFP4_1D_SCALING
    q_layout: QuantizeLayout = QuantizeLayout.ROWWISE_COLWISE
    data_layout: str = "NT"
    stochastic_rounding_rng_state: Optional[jnp.ndarray] = None

    def __post_init__(self):
        assert (
            self.q_dtype == jnp.float4_e2m1fn
        ), "NVFP4 quantization must use a q_dtype of float4_e2m1fn"
        assert self.scaling_mode.is_nvfp4_scaling, "NVFP4Quantizer must use NVFP4 scaling modes"

    def _apply_stochastic_rounding(self, x):
        assert (
            self.stochastic_rounding_rng_state is not None
        ), "Stochastic rounding RNG state is not initialized"
        assert self.stochastic_rounding_rng_state.shape == (
            4,
        ), "Stochastic rounding RNG state must be of shape (4,)"
        assert (
            self.stochastic_rounding_rng_state.dtype == jnp.uint32
        ), "Stochastic rounding RNG state must be of dtype uint32"

        # Default RNG state in JAX expects 2x 32-bit integers, use first 2 uint32s for initial state and fold in the other 2 uint32s
        key_bits = jnp.array(
            [
                self.stochastic_rounding_rng_state[0],
                self.stochastic_rounding_rng_state[1],
            ],
            dtype=jnp.uint32,
        )
        key = jax.random.wrap_key_data(key_bits)
        key = jax.jit(jax.random.fold_in)(key, self.stochastic_rounding_rng_state[2])
        key = jax.jit(jax.random.fold_in)(key, self.stochastic_rounding_rng_state[3])

        abs_x = jnp.abs(x)
        sign_x = jnp.sign(x)

        floor = (
            (abs_x >= 0.5) * 0.5
            + (abs_x >= 1) * 0.5
            + (abs_x >= 2)
            + (abs_x >= 3)
            + (abs_x >= 4)
            + (abs_x >= 6) * 2
        )
        ceil = (
            0.5
            + (abs_x > 0.5) * 0.5
            + (abs_x > 1) * 1
            + (abs_x > 2)
            + (abs_x > 3)
            + (abs_x > 4) * 2
        )
        frac = (abs_x - floor) / (ceil - floor)

        rand = jax.random.uniform(key, abs_x.shape)
        return sign_x * jnp.where(frac >= rand, ceil, floor)

    def _quantize_func(self, x, is_colwise=False, dq_dtype=None, flatten_axis=-1) -> ScaledTensor1x:
        """Quantize function helper for block scaling FP8.

        Args:
            x: Input tensor to quantize
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
            flatten_axis: The quantization axis for the tensor

        Returns:
            A ScaledTensor1x containing the quantized data
        """
        # TODO(Phuong): use quantize_func from JAX
        if flatten_axis < 0:
            flatten_axis = x.ndim + flatten_axis
        assert (
            0 <= flatten_axis < x.ndim
        ), f"Invalid flatten_axis: {flatten_axis} for tensor of shape {x.shape}"

        should_apply_rht = self.scaling_mode == ScalingMode.NVFP4_1D_SCALING and is_colwise

        global_amax = None
        if isinstance(x, NoScaleTensor):
            global_amax = (
                x.amax if not should_apply_rht else None
            )  # RHT changes the amax so don't use precalculated amax for colwise 1D nvfp4 quantization with RHT
            x = x.data

        # Transpose if required
        rowwise_flatten_axis = flatten_axis
        data_layout = self.data_layout[0]
        if is_colwise:
            x = jnp.transpose(x, (*range(flatten_axis, x.ndim), *range(flatten_axis)))
            data_layout = self.data_layout[1]
            # convert flatten_axis from N layout to T layout
            flatten_axis = x.ndim - flatten_axis
        x_shape = x.shape

        if should_use_rht(self.scaling_mode, is_colwise=is_colwise):
            # We only apply RHT for 1D colwise nvfp4
            x = apply_rht(x)

        dq_dtype = dq_dtype if dq_dtype is not None else x.dtype
        scale_shape = self.scaling_mode.get_scale_shape(
            x_shape,
            data_layout=data_layout,
            is_colwise=is_colwise,
            is_padded=False,
            flatten_axis=rowwise_flatten_axis,
        )
        scale_dtype = self.scaling_mode.get_scale_dtype()
        x = x.reshape(
            *x_shape[: flatten_axis - 1],
            scale_shape[flatten_axis - 1],
            int(x_shape[flatten_axis - 1] / scale_shape[flatten_axis - 1]),
            *x_shape[flatten_axis:-1],
            scale_shape[-1],
            int(x_shape[-1] / scale_shape[-1]),
        )

        # Dtype max constants
        DATA_DTYPE_MAX = jnp.finfo(self.q_dtype).max.astype(jnp.float32)
        SCALE_DTYPE_MAX = jnp.finfo(scale_dtype).max.astype(jnp.float32)

        # Level 1: Current Tensor Scaling
        global_amax = (
            global_amax
            if global_amax is not None
            else jnp.max(jnp.abs(x)).reshape((1,)).astype(jnp.float32)
        )
        tensor_scale = DATA_DTYPE_MAX * SCALE_DTYPE_MAX / global_amax
        tensor_scale = jnp.minimum(
            tensor_scale, jnp.array(jnp.finfo(jnp.float32).max, dtype=jnp.float32)
        )
        tensor_scale = jnp.where(
            tensor_scale == jnp.array(0.0, dtype=jnp.float32),
            jnp.array(1.0, dtype=jnp.float32),
            tensor_scale,
        )
        tensor_scale_inv = 1.0 / tensor_scale

        # Level 2: Block Scaling
        block_amax = jnp.max(jnp.abs(x), axis=(flatten_axis + 2 - 2, -1), keepdims=True).astype(
            jnp.float32
        )
        block_scale_inv = jnp.divide(block_amax, DATA_DTYPE_MAX)
        block_scale_inv = block_scale_inv * tensor_scale
        block_scale_inv = jnp.minimum(
            block_scale_inv, jnp.array(jnp.finfo(jnp.float32).max, dtype=jnp.float32)
        )
        block_scale_inv = jnp.clip(block_scale_inv, -SCALE_DTYPE_MAX, SCALE_DTYPE_MAX)
        # We cast block_scale_inv to scale_dtype here to account for any rounding during the cast. This will ensure the quantized data incorporates the rounded scale value into its computation so dequantization is accurate.
        block_scale_inv = block_scale_inv.astype(scale_dtype)
        # Note, with JIT jax removes this intermediate cast leading to slightly incorrect results during DQ and worse convergence to the original tensor during many samples of Q+SR->DQ. So we use reduce_precision to simulate the cast to scale_dtype.
        assert scale_dtype == jnp.float8_e4m3fn, "Only float8_e4m3fn is supported for scale_dtype"
        block_scale_inv = jax.lax.reduce_precision(block_scale_inv, 4, 3)
        block_scale = jnp.minimum(
            jnp.divide(1.0, block_scale_inv.astype(jnp.float32) * tensor_scale_inv),
            jnp.array(jnp.finfo(jnp.float32).max, dtype=jnp.float32),
        )

        # Apply scaling
        scaled_x = x.astype(jnp.float32) * block_scale
        if self.stochastic_rounding_rng_state is not None:
            scaled_x = self._apply_stochastic_rounding(scaled_x)
        clipped_x = jnp.clip(scaled_x, -DATA_DTYPE_MAX, DATA_DTYPE_MAX)

        # Cast to the right dtype
        quantized_data = clipped_x.reshape(x_shape).astype(self.q_dtype)
        block_scale_inv = block_scale_inv.reshape(scale_shape).astype(scale_dtype)

        # In the 2D scaling mode, the scale shape is 2D but it needs to be broadcasted to 1D for GEMM.
        # TODO(Phuong): expose this broadcast_2d_scale_shape_to_1d option to the
        # quantizer.quantize() API
        broadcasted_1d_scale_shape = self.scaling_mode.get_scale_shape(
            x_shape,
            data_layout=data_layout,
            is_colwise=is_colwise,
            is_padded=False,
            flatten_axis=rowwise_flatten_axis,
            broadcast_2d_scale_shape_to_1d=True,
        )

        # Broadcast and tile x to match the target shape
        def repeat_to_shape(x, target_shape):
            x_shape = x.shape
            reps = [int(t // s) for s, t in zip(x_shape, target_shape)]
            return jnp.tile(x, reps)

        block_scale_inv = repeat_to_shape(block_scale_inv, broadcasted_1d_scale_shape)

        return ScaledTensorFactory.create_1x(
            data=quantized_data,
            data_layout=data_layout,
            is_colwise=is_colwise,
            scale_inv=block_scale_inv,
            amax=global_amax,
            scaling_mode=self.scaling_mode,
            dq_dtype=dq_dtype,
            flatten_axis=rowwise_flatten_axis,
        )


796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
@register_pytree_node_class
@dataclass
class QuantizerSet:
    """Set of quantizers for different tensor types.

    This class manages quantizers for input tensors, kernel tensors, and
    gradient tensors.

    Attributes:
        x: Quantizer for input tensors
        kernel: Quantizer for kernel tensors
        dgrad: Quantizer for gradient tensors
    """

    x: Optional[Quantizer]
    kernel: Optional[Quantizer]
    dgrad: Optional[Quantizer]

    def tree_flatten(self):
        """Flatten the quantizer set for JAX tree operations.

        Returns:
            Tuple of (children, aux_data) for tree operations
        """
        children = (self.x, self.kernel, self.dgrad)
        aux_data = ()
        return (children, aux_data)

    @classmethod
    def tree_unflatten(cls, aux_data, children):
        """Reconstruct a quantizer set from its flattened representation.

        Args:
            aux_data: Unused auxiliary data
            children: Tuple of quantizers

        Returns:
            A reconstructed QuantizerSet instance
        """
        return cls(*aux_data, *children)


838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
@register_pytree_node_class
@dataclass
class GroupedQuantizer(Quantizer):
    """Quantizer for grouped arrays.

    This class extends Quantizer to support quantization of arrays in grouped manner,
    where elements are grouped along a specified axis then quantized separately.

    Attributes:
        data_layout: The data layout specification
        n_groups: Number of groups for quantization
        quantizers: Tuple of quantizers for each group
    """

    data_layout: str = None
    n_groups: int = 1
    quantizers: Tuple[Quantizer] = field(default_factory=lambda: (None,))

    def tree_flatten(self):
        """Flatten the quantizer for JAX tree operations.

        Returns:
            Tuple of (children, aux_data) for tree operations
        """
        children = (self.quantizers,)
        aux_data = (self.q_dtype, self.scaling_mode, self.q_layout, self.data_layout, self.n_groups)
        return (children, aux_data)

    def __post_init__(self):
        if self.quantizers[0] is None:
868
            quantizers = QuantizerFactory.create(
869
870
                self.n_groups, self.scaling_mode, self.q_dtype, self.q_layout
            )
871
            self.quantizers = (quantizers,) if not isinstance(quantizers, tuple) else quantizers
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
        self.data_layout = self.quantizers[0].data_layout

    def _create_grouped_tensor_from_tensor_list(
        self, tensor_list, group_sizes, original_shape, group_axis, mode
    ):
        # mode 0 = concate, mode 1 = add
        # TODO(Ming Huang): Consider to apply Enum for mode.
        assert mode in [0, 1]
        grouped_data = (
            [] if mode == 0 else jnp.zeros(tensor_list[0].data.shape, tensor_list[0].data.dtype)
        )
        grouped_scale_inv = []

        for tensor in tensor_list:
            if mode == 0:
                grouped_data.append(tensor.data.flatten())
            else:
                grouped_data += tensor.data
            grouped_scale_inv.append(tensor.scale_inv.flatten())

        grouped_data = jnp.concatenate(grouped_data) if mode == 0 else grouped_data.flatten()
        grouped_scale_inv = jnp.concatenate(grouped_scale_inv)

        return ScaledTensorFactory.create_1x(
            grouped_data,
            grouped_scale_inv,
898
899
900
901
902
            scaling_mode=self.scaling_mode,
            dq_dtype=tensor_list[0].dq_dtype,
            is_colwise=tensor_list[0].is_colwise,
            data_layout=tensor_list[0].data_layout,
            flatten_axis=tensor_list[0].flatten_axis,
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
            group_sizes=group_sizes,
            original_shape=original_shape,
            group_axis=group_axis,
        )

    def _quantize_func(self, *args, **kwargs):
        pass

    def quantize(
        self,
        x,
        is_rowwise: bool = None,
        is_colwise: bool = None,
        dq_dtype=None,
        flatten_axis=-1,
        group_sizes=None,
        group_axis=0,
    ):
        """Quantize a tensor in grouped manner.

        Expected input shape: [M, K] or [G, K, N]
        Split to x.shape[group_axis] number of groups if group_sizes is not given

        Args:
            x: Input tensor to quantize
            is_rowwise: Whether to use row-wise quantization
            is_colwise: Whether to use column-wise quantization
            dq_dtype: Data type for dequantized values
            flatten_axis: The axis along which the tensor could be flattened to 2D (default: -1)
            group_sizes: Array of ints containing the size of each group (default: None)
            group_axis: The axis along which grouping is performed (default: 0)

        Returns:
            A ScaledTensor1x or ScaledTensor2x containing the quantized data
        """
        assert group_axis == 0, "Only group_axis == 0 is supported now!"

        dq_dtype = dq_dtype if dq_dtype is not None else x.dtype
        if flatten_axis < 0:
            flatten_axis += x.ndim
        assert 0 < flatten_axis < x.ndim, "flatten_axis is out of bounds!"

        is_rowwise = (
            is_rowwise
            if is_rowwise is not None
            else (self.q_layout == QuantizeLayout.ROWWISE or self.is_2x2x())
        )
        is_colwise = (
            is_colwise
            if is_colwise is not None
            else (self.q_layout == QuantizeLayout.COLWISE or self.is_2x2x())
        )
        assert is_rowwise or is_colwise, "No quantization layout is specified"

        original_shape = x.shape

        if group_sizes is not None:
            assert not is_colwise, "Not yet implememted!"
            assert group_sizes.ndim == 1, (
                "GroupedQuantizer only support 1D group_sizes, got group_sizes.ndim ="
                f" {group_sizes.ndim}"
            )

            _zeros = partial(jax.lax.full_like, fill_value=0)

            x_iota = jax.lax.broadcasted_iota(group_sizes.dtype, x.shape, 0)
            group_ends = jnp.cumulative_sum(group_sizes)
            group_starts = jax.lax.concatenate(
                [_zeros(group_sizes)[:1], group_ends[:-1]],
                dimension=0,
            )
            x_zero = _zeros(x)

            tensor_list = []
            for i in range(len(group_sizes)):
                mask = jax.lax.bitwise_and(group_starts[i] <= x_iota, x_iota < group_ends[i])
                x_selected = jax.lax.select(mask, x, x_zero)
                tensor = self.quantizers[i].quantize(
                    x_selected, is_rowwise, is_colwise, dq_dtype, flatten_axis
                )
                tensor_list.append(tensor)
            combine_mode = 1  # Add
        else:
            group_sizes = jnp.ones(x.shape[group_axis], dtype=jnp.int32)
            x = jnp.split(x, x.shape[group_axis], axis=group_axis)

            tensor_list = []
            for i in range(len(group_sizes)):
                tensor = self.quantizers[i].quantize(
                    x[i], is_rowwise, is_colwise, dq_dtype, flatten_axis
                )
                tensor_list.append(tensor)
            combine_mode = 0  # Concate

        grouped_rowwise_tensor = grouped_colwise_tensor = None
        if is_rowwise:
            rowwise_tensor_list = [tensor.get_rowwise_tensor() for tensor in tensor_list]
            grouped_rowwise_tensor = self._create_grouped_tensor_from_tensor_list(
                rowwise_tensor_list, group_sizes, original_shape, group_axis, combine_mode
            )
        if is_colwise:
            colwise_tensor_list = [tensor.get_colwise_tensor() for tensor in tensor_list]
            grouped_colwise_tensor = self._create_grouped_tensor_from_tensor_list(
                colwise_tensor_list, group_sizes, original_shape, group_axis, combine_mode
            )

        if is_colwise and is_rowwise:
            return ScaledTensor2x(grouped_rowwise_tensor, grouped_colwise_tensor)
        if is_colwise:
            return grouped_colwise_tensor
        return grouped_rowwise_tensor

    def get_scale_shapes(self, data_shape, is_padded=True, flatten_axis=-1, group_sizes=None):
        assert group_sizes, "Empty group_sizes was given!"
        return self.scaling_mode.get_grouped_scale_shape_2x(
            data_shape, group_sizes, is_padded, flatten_axis
        )


1022
1023
1024
1025
1026
1027
1028
1029
1030
@dataclass
class QuantizerFactory:
    """Factory class for creating quantizers.

    This class provides static methods to create individual quantizers and
    sets of quantizers with various configurations.
    """

    quantizer_type_map = {
1031
        ScalingMode.DELAYED_TENSOR_SCALING: DelayedScaleQuantizer,
1032
        ScalingMode.CURRENT_TENSOR_SCALING: CurrentScaleQuantizer,
1033
        ScalingMode.MXFP8_1D_SCALING: BlockScaleQuantizer,
1034
1035
        ScalingMode.NVFP4_1D_SCALING: NVFP4Quantizer,
        ScalingMode.NVFP4_2D_SCALING: NVFP4Quantizer,
1036
1037
1038
1039
1040
1041
1042
    }

    @staticmethod
    def create(
        n_quantizers: int = 1,
        scaling_mode: ScalingMode = None,
        q_dtype: jnp.dtype = None,
1043
        q_layout: QuantizeLayout = None,
1044
        n_groups: int = None,
1045
1046
1047
1048
1049
1050
1051
1052
        **kwargs,
    ) -> Quantizer:
        """Create one or more quantizers with specified parameters.

        Args:
            n_quantizers: Number of quantizers to create
            scaling_mode: Scaling mode to use
            q_dtype: Quantization data type
1053
1054
            q_layout: Quantization axis
            flatten_axis: The quantization axis for the tensor
1055
            n_groups: Number of quantizers if GroupedQuantizer
1056
1057
1058
1059
1060
            **kwargs: Additional arguments for quantizer initialization

        Returns:
            A single quantizer or tuple of quantizers
        """
1061
        assert isinstance(scaling_mode, ScalingMode), "Invalid scaling_mode type"
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
        if n_groups:
            if n_quantizers != 1:
                warnings.warn(
                    "Using more than one GroupedQuantizer for a grouped input is not recommended"
                )
            quantizer_type = GroupedQuantizer
            kwargs["n_groups"] = n_groups
        else:
            quantizer_type = QuantizerFactory.quantizer_type_map.get(scaling_mode)

1072
        if scaling_mode == ScalingMode.NO_SCALING:
1073
1074
1075
1076
1077
1078
            quantizers = [None] * n_quantizers
        else:
            quantizers = []
            for _ in range(n_quantizers):
                quantizers.append(
                    quantizer_type(
1079
                        q_dtype=q_dtype, scaling_mode=scaling_mode, q_layout=q_layout, **kwargs
1080
1081
1082
1083
1084
                    )
                )
        return quantizers[0] if len(quantizers) == 1 else tuple(quantizers)

    @staticmethod
1085
    def _create_set(
1086
1087
1088
1089
1090
1091
1092
1093
        x_scaling_mode,
        kernel_scaling_mode,
        grad_scaling_mode,
        fwd_dtype,
        bwd_dtype,
        is_2x2x,
        n_groups,
        **kwargs,
1094
    ) -> QuantizerSet:
1095
1096
1097
        """Create a set of quantizers for forward and backward passes.

        Args:
1098
1099
1100
            x_scaling_mode: Scaling mode to use for input tensor 'x'
            kernel_scaling_mode: Scaling mode to use for kernel tensor
            grad_scaling_mode: Scaling mode to use for gradient tensor
1101
1102
1103
            fwd_dtype: Data type for forward pass
            bwd_dtype: Data type for backward pass
            is_2x2x: Whether to use 2x2x quantization
1104
            n_groups
1105
1106
1107
1108
1109
1110
            **kwargs: Additional arguments for quantizer initialization

        Returns:
            A QuantizerSet instance
        """
        if is_2x2x:
1111
            q_layout_x = q_layout_kernel = q_layout_dgrad = QuantizeLayout.ROWWISE_COLWISE
1112
        else:
1113
            q_layout_x = q_layout_kernel = q_layout_dgrad = QuantizeLayout.ROWWISE
1114
            if kernel_scaling_mode.is_1d_block_scaling():
1115
                q_layout_kernel = QuantizeLayout.COLWISE
1116
            if get_quantize_config().INFERENCE_MODE:
1117
                q_layout_dgrad = None
1118
1119
1120

        if "quantize_meta_set" in kwargs:
            quantize_meta_set = kwargs.get("quantize_meta_set")
1121
1122
1123
            args_x = quantize_meta_set.x.get_kwargs_dictionary()
            args_kernel = quantize_meta_set.kernel.get_kwargs_dictionary()
            args_grad = quantize_meta_set.grad.get_kwargs_dictionary()
1124
1125
1126
        else:
            args_x = args_kernel = args_grad = {}

1127
        q_x = QuantizerFactory.create(1, x_scaling_mode, fwd_dtype, q_layout_x, n_groups, **args_x)
1128
        q_kernel = QuantizerFactory.create(
1129
            1, kernel_scaling_mode, fwd_dtype, q_layout_kernel, n_groups, **args_kernel
1130
1131
        )
        q_dgrad = QuantizerFactory.create(
1132
            1, grad_scaling_mode, bwd_dtype, q_layout_dgrad, n_groups, **args_grad
1133
        )
1134
1135
1136
1137
1138
        return QuantizerSet(x=q_x, kernel=q_kernel, dgrad=q_dgrad)

    @staticmethod
    def create_set(
        n_quantizer_sets: int = 1,
1139
        scaling_mode: Optional[ScalingMode] = None,
1140
1141
1142
        fwd_dtype: jnp.dtype = None,
        bwd_dtype: jnp.dtype = None,
        is_2x2x: bool = None,
1143
        n_groups: int = None,
1144
        # TODO(jberchtold): rename fp8_recipe to quantization_recipe
1145
        fp8_recipe: Optional[recipe.Recipe] = None,
1146
1147
1148
1149
1150
1151
        **kwargs,
    ) -> tuple[Union[tuple[Quantizer], None]]:
        """Create one or more sets of quantizers.

        Args:
            n_quantizer_sets: Number of quantizer sets to create
1152
1153
1154
1155
            scaling_mode: Scaling mode to use, default is get_quantize_config().get_scaling_mode
            fwd_dtype: Data type for forward pass, default is get_quantize_config().FWD_DTYPE
            bwd_dtype: Data type for backward pass, default is get_quantize_config().BWD_DTYPE
            is_2x2x: Whether to use 2x2x quantization, default is get_quantize_config().IF_QUANTIZE_2X
1156
            n_groups:
1157
            fp8_recipe: Recipe to use for quantization. Scaling mode can be specified directly via the scaling_mode parameter or indirectly via recipe. Recipe is preferred as it will support additional recipes in future where scaling mode differs between x, kernel, and grad in the quantizer set.
1158
1159
1160
1161
1162
            **kwargs: Additional arguments for quantizer initialization

        Returns:
            A single quantizer set or tuple of quantizer sets
        """
1163
1164
1165
1166
1167
1168
1169
1170
1171

        assert scaling_mode is None or fp8_recipe is None, (
            "Cannot specify both scaling_mode and fp8_recipe when creating a quantizer set. Scaling"
            " mode can be specified directly via the scaling_mode parameter or indirectly via"
            " recipe. Recipe is preferred as it will support additional recipes in future where"
            " scaling mode differs between x, kernel, and grad in the quantizer set."
        )

        if fp8_recipe is not None:
1172
            quantize_config = get_quantize_config_with_recipe(fp8_recipe)
1173
1174
1175
            x_scaling_mode = quantize_config.get_scaling_mode(TensorSource.X)
            kernel_scaling_mode = quantize_config.get_scaling_mode(TensorSource.KERNEL)
            grad_scaling_mode = quantize_config.get_scaling_mode(TensorSource.DGRAD)
1176
1177
            fwd_dtype = quantize_config.FWD_DTYPE
            bwd_dtype = quantize_config.BWD_DTYPE
1178
        else:
1179
1180
1181
1182
1183
1184
1185
1186
            if scaling_mode is not None:
                x_scaling_mode = scaling_mode
                kernel_scaling_mode = scaling_mode
                grad_scaling_mode = scaling_mode
            else:
                x_scaling_mode = get_quantize_config().get_scaling_mode(TensorSource.X)
                kernel_scaling_mode = get_quantize_config().get_scaling_mode(TensorSource.KERNEL)
                grad_scaling_mode = get_quantize_config().get_scaling_mode(TensorSource.DGRAD)
1187

1188
1189
            fwd_dtype = fwd_dtype or get_quantize_config().FWD_DTYPE
            bwd_dtype = bwd_dtype or get_quantize_config().BWD_DTYPE
1190
        if is_2x2x is None:
1191
1192
            # TODO(Jeremy): check x, kernel, grad separately for 2x
            if x_scaling_mode.is_1d_block_scaling():
1193
                is_2x2x = True
1194
            elif x_scaling_mode.is_tensor_scaling():
1195
1196
1197
                is_2x2x = not is_fp8_gemm_with_all_layouts_supported()
            else:  # NO_SCALING ignores is_2x2x for now
                is_2x2x = False
1198
        is_inference_mode = get_quantize_config().INFERENCE_MODE
1199
        assert not is_inference_mode, "Inference mode is not supported yet!"
1200
1201
1202
1203

        q_set = []
        for _ in range(n_quantizer_sets):
            q_set.append(
1204
                QuantizerFactory._create_set(
1205
1206
1207
1208
1209
1210
1211
1212
                    x_scaling_mode=x_scaling_mode,
                    kernel_scaling_mode=kernel_scaling_mode,
                    grad_scaling_mode=grad_scaling_mode,
                    fwd_dtype=fwd_dtype,
                    bwd_dtype=bwd_dtype,
                    is_2x2x=is_2x2x,
                    n_groups=n_groups,
                    **kwargs,
1213
                )
1214
1215
1216
1217
1218
            )

        return q_set[0] if len(q_set) == 1 else tuple(q_set)


1219
noop_quantizer_set = QuantizerFactory.create_set(scaling_mode=ScalingMode.NO_SCALING, is_2x2x=False)