common.h 35 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
8
9
 *
 * See LICENSE for license information.
 ************************************************************************/

#ifndef TRANSFORMER_ENGINE_COMMON_COMMON_H_
#define TRANSFORMER_ENGINE_COMMON_COMMON_H_

10
#include <cudaTypedefs.h>
11
12
#define FP4_TYPE_SUPPORTED (CUDA_VERSION >= 12080)

13
14
15
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <cuda_fp8.h>
16
17
18
19
#if FP4_TYPE_SUPPORTED
#include <cuda_fp4.h>
#endif

20
21
22
#include <cuda_runtime_api.h>
#include <transformer_engine/transformer_engine.h>

23
#include <cstdint>
Przemek Tredak's avatar
Przemek Tredak committed
24
25
26
27
#include <functional>
#include <stdexcept>
#include <string>
#include <tuple>
Tim Moon's avatar
Tim Moon committed
28
29
#include <type_traits>
#include <unordered_map>
Przemek Tredak's avatar
Przemek Tredak committed
30
#include <vector>
Tim Moon's avatar
Tim Moon committed
31
32

#include "./nvtx.h"
33
#include "./util/cuda_driver.h"
Tim Moon's avatar
Tim Moon committed
34
#include "./util/logging.h"
Przemek Tredak's avatar
Przemek Tredak committed
35
36
37

namespace transformer_engine {

38
39
40
std::string to_string(const DType type);
std::string to_string(const NVTEScalingMode &mode);

41
42
43
44
45
46
47
48
49
50
inline bool is_tensor_scaling(const NVTEScalingMode &mode) {
  return mode == NVTE_DELAYED_TENSOR_SCALING;
}

inline bool is_block_scaling(const NVTEScalingMode &mode) { return !is_tensor_scaling(mode); }

inline bool is_delayed_tensor_scaling(const NVTEScalingMode &mode) {
  return mode == NVTE_DELAYED_TENSOR_SCALING;
}

51
52
53
54
inline bool is_nvfp4_scaling(const NVTEScalingMode &mode) { return mode == NVTE_NVFP4_1D_SCALING; }

inline bool is_mxfp8_scaling(const NVTEScalingMode &mode) { return mode == NVTE_MXFP8_1D_SCALING; }

55
56
inline bool is_mxfp_scaling(const NVTEScalingMode &mode) { return mode == NVTE_MXFP8_1D_SCALING; }

57
58
inline bool is_nvfp_scaling(const NVTEScalingMode &mode) { return mode == NVTE_NVFP4_1D_SCALING; }

59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
inline size_t product(const std::vector<size_t> &shape, const size_t begin, const size_t end) {
  NVTE_CHECK(begin <= end && end <= shape.size(), "Attempted to access entries ", begin, " to ",
             end, " in a vector with ", shape.size(), " entries");
  size_t ret = 1;
  for (size_t i = begin; i < end; ++i) {
    ret *= shape[i];
  }
  return ret;
}

inline size_t product(const std::vector<size_t> &shape) {
  size_t ret = 1;
  for (const auto &elem : shape) {
    ret *= elem;
  }
  return ret;
}

77
78
79
80
size_t get_buffer_size_bytes(const size_t N, const DType buffer_dtype);
size_t get_buffer_size_bytes(const size_t dim_first, const size_t dim_last,
                             const DType buffer_dtype);

81
82
83
84
85
struct SimpleTensor {
  void *dptr;
  std::vector<size_t> shape;
  DType dtype;

86
87
  SimpleTensor(void *dptr, std::vector<size_t> shape, DType dtype)
      : dptr{dptr}, shape{std::move(shape)}, dtype{dtype} {}
88
89
90
91
92
93

  SimpleTensor(const NVTEBasicTensor &tensor)  // NOLINT
      : dptr(tensor.data_ptr),
        shape(tensor.shape.data, tensor.shape.data + tensor.shape.ndim),
        dtype(static_cast<DType>(tensor.dtype)) {}

94
  SimpleTensor() : SimpleTensor(nullptr, std::vector<size_t>{0}, DType::kFloat32) {}
95
96

  operator NVTEBasicTensor() const {
97
98
    return {dptr, static_cast<NVTEDType>(dtype),
            nvte_make_shape(this->shape.data(), this->shape.size())};
99
100
  }

101
102
103
104
105
106
107
108
109
110
111
112
113
  /*! Number of tensor elements. */
  size_t numel() const { return product(shape); }

  /*! Whether the tensor is initialized.
   *
   *  Tensors with non-trivial shapes are considered initialized. This
   *  means that there is no guarantee that the data pointer can be
   *  safely accessed.
   */
  bool has_data() const { return !(dptr == nullptr && shape.size() == 1 && shape[0] == 0); }

  /*! Buffer size in bytes. */
  size_t buffer_size_bytes() const { return get_buffer_size_bytes(numel(), dtype); }
114

115
  /*! Reset to uninitialized tensor. */
116
117
  void clear() {
    dptr = nullptr;
118
119
    shape.resize(1);
    shape[0] = 0;
120
121
    dtype = DType::kFloat32;
  }
122
};
Przemek Tredak's avatar
Przemek Tredak committed
123

124
struct Tensor {
125
 public:
126
  SimpleTensor data;
127
  SimpleTensor columnwise_data;
128
  SimpleTensor amax;
129
  SimpleTensor columnwise_amax;
130
131
  SimpleTensor scale;
  SimpleTensor scale_inv;
132
133
134
  SimpleTensor columnwise_scale_inv;

  NVTEScalingMode scaling_mode;
135
  NVTETensor nvte_tensor;
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  /*! \brief Whether scaling factors are in format expected by GEMM
   *
   *  Only meaningful for MXFP8 and NVFP4.
   */
  bool with_gemm_swizzled_scales = false;

  /*! Map from NVTETensorParam to parameter sizes */
  static constexpr size_t attr_sizes[] = {
      sizeof(NVTEBasicTensor),  // kNVTERowwiseData
      sizeof(NVTEBasicTensor),  // kNVTEColumnwiseData
      sizeof(NVTEBasicTensor),  // kNVTEScale
      sizeof(NVTEBasicTensor),  // kNVTEAmax
      sizeof(NVTEBasicTensor),  // kNVTERowwiseScaleInv
      sizeof(NVTEBasicTensor),  // kNVTEColumnwiseScaleInv
      sizeof(NVTEBasicTensor),  // kNVTEColumnwiseAmax
      sizeof(uint8_t)           // kNVTEWithGEMMSwizzledScales
  };
153

154
  Tensor() : scaling_mode{NVTE_DELAYED_TENSOR_SCALING}, nvte_tensor{0} {}
155

156
  /*! Reset tensor data. */
157
158
159
160
  void clear() {
    data.clear();
    columnwise_data.clear();
    amax.clear();
161
    columnwise_amax.clear();
162
163
164
165
    scale.clear();
    scale_inv.clear();
    columnwise_scale_inv.clear();
    scaling_mode = NVTE_DELAYED_TENSOR_SCALING;
166
    with_gemm_swizzled_scales = false;
167
168
169
  }

  explicit operator NVTETensor() const noexcept { return nvte_tensor; }
170

171
  /*! Number of tensor elements. */
172
  size_t numel() const {
173
174
    if (!has_data() && has_columnwise_data()) {
      return product(columnwise_data.shape);
175
    }
176
    return product(data.shape);
177
178
  }

179
180
181
182
183
184
185
  /*! Whether the tensor data buffer is not uninitialized.
   *
   *  Buffers with non-trivial shapes are considered initialized. This
   *  means that there is no guarantee that the data pointer can be
   *  safely accessed.
   */
  bool has_data() const { return data.has_data(); }
186

187
188
189
190
191
192
193
  /*! Whether the tensor column-wise data buffer is not uninitialized.
   *
   *  Buffers with non-trivial shapes are considered initialized. This
   *  means that there is no guarantee that the data pointer can be
   *  safely accessed.
   */
  bool has_columnwise_data() const { return columnwise_data.has_data(); }
194

195
  /*! Datatype of tensor elements. */
196
  DType dtype() const {
197
198
199
    if (!has_data() && has_columnwise_data()) {
      return columnwise_data.dtype;
    }
200
201
202
    return data.dtype;
  }

203
  /*! Number of tensor dimensions. */
204
205
206
207
  size_t dim() const {
    if (!has_data() && has_columnwise_data()) {
      return columnwise_data.shape.size();
    }
208
    return data.shape.size();
209
210
  }

211
212
213
214
215
216
  /*! Tensor dimensions.
   *
   *  This is the logical tensor shape. The underlying data may have a
   *  different shape, e.g. the column-wise data for some tensor
   *  formats are transposed.
   */
217
  std::vector<size_t> shape() const {
218
    // Each tensor format interprets its data differently
219
220
    switch (scaling_mode) {
      case NVTE_DELAYED_TENSOR_SCALING:
221
222
      case NVTE_BLOCK_SCALING_1D:
      case NVTE_BLOCK_SCALING_2D:
223
      case NVTE_NVFP4_1D_SCALING: {
224
225
226
        // Row-wise data shape matches tensor logical shape,
        // column-wise data shape is transpose of logical shape
        if (!has_data() && has_columnwise_data()) {
227
228
          std::vector<size_t> ret;
          if (!columnwise_data.shape.empty()) {
229
            ret.reserve(columnwise_data.shape.size());
230
231
232
233
234
235
236
            for (size_t i = 1; i < columnwise_data.shape.size(); i++) {
              ret.push_back(columnwise_data.shape[i]);
            }
            ret.push_back(columnwise_data.shape.front());
          }
          return ret;
        }
237
238
        return data.shape;
      }
239
240
241
      case NVTE_MXFP8_1D_SCALING: {
        // Row-wise and column-wise data shapes both match tensor
        // logical shape
242
243
        if (!has_data() && has_columnwise_data()) {
          return columnwise_data.shape;
244
        }
245
        return data.shape;
246
      }
247
248
249
250
251
      default:
        NVTE_ERROR("Cannot parse tensor shape with scaling mode \"", to_string(scaling_mode), "\"");
    }
  }

252
253
254
255
256
257
  /*! Matrix height after tensor is flattened to 2D
   *
   * If a tensor has dimensions (D1, D2, ..., Dn), it is reinterpreted
   * as a (D1*D2*...*D(n-1), Dn) matrix.
   */
  size_t flat_first_dim() const {
258
259
260
261
262
    const auto &full_shape = shape();
    size_t ret = 1;
    if (!full_shape.empty()) {
      for (size_t i = 0; i < full_shape.size() - 1; i++) {
        ret *= full_shape[i];
263
264
      }
    }
265
    return ret;
266
267
268
269
270
271
272
273
  }

  /*! Matrix width after tensor is flattened to 2D
   *
   * If a tensor has dimensions (D1, D2, ..., Dn), it is reinterpreted
   * as a (D1*D2*...*D(n-1), Dn) matrix.
   */
  size_t flat_last_dim() const {
274
275
276
277
278
    const auto &full_shape = shape();
    if (full_shape.empty()) {
      return 1;
    } else {
      return full_shape.back();
279
280
    }
  }
Przemek Tredak's avatar
Przemek Tredak committed
281
282
};

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
struct GroupedTensor {
 public:
  /* EXPERIMENTAL FEATURE AND SUBJECT TO CHANGE. */
  /*
  Grouped tensor is a collection of tensors with different shapes but the same dtype and scaling mode

  Shape Representation:
  - logical_shape: 2D shape representing the conceptual layouy, i.e. the shape when member tensors are flattened to 2D and stacked together (REQUIRED)
    + When all_same_shape(): [num_tensors * M, N] where each tensor is (M, N)
    + When varying_first_dim(): [~sum_of_first_dims, N] where N is common
    + When varying_last_dim(): [M, ~sum_of_last_dims] where M is common
    + When varying_both_dims(): [1, total_elements] (fully flattened)

  - first_dims and last_dims are OPTIONAL (empty if dimension is uniform)
    + Empty first_dims: all tensors have the same first dimension
    + Empty last_dims: all tensors have the same last dimension
    + Both empty: all tensors have identical shapes
    + Both set: each tensor has unique shape (first_dims[i], last_dims[i])

  Data Layout:
  - ALL data fields are stored as 1D flattened arrays (data, columnwise_data, scale_inv, etc.)
  - logical_shape provides the conceptual 2D interpretation
  - All data is stored on device in contiguous layout
  */

  SimpleTensor data;
  SimpleTensor columnwise_data;
  SimpleTensor scale_inv;
  SimpleTensor columnwise_scale_inv;
  SimpleTensor amax;
  SimpleTensor columnwise_amax;
  SimpleTensor scale;  // for FP8-DS only

316
317
318
  NVTEScalingMode scaling_mode;
  size_t num_tensors;

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
  // Shape information (OPTIONAL - empty if dimension is uniform across all tensors)
  // first_dims[i] = first dimension of tensor i (empty if all tensors have same first dim)
  // last_dims[i] = last dimension of tensor i (empty if all tensors have same last dim)
  SimpleTensor first_dims;  // Device pointer to int64_t array of length num_tensors (or empty)
  SimpleTensor last_dims;   // Device pointer to int64_t array of length num_tensors (or empty)

  // Offsets for indexing into contiguous 1D layout (OPTIONAL - not needed if all_same_shape())
  // tensor_offsets[i] = element offset to start of tensor i (cumulative sum of numel for tensors 0..i-1)
  // Usage: tensor_i_ptr = (char*)data.dptr + tensor_offsets[i] * element_size
  // If empty and all_same_shape(): offset[i] = i * M * N (where M, N are common dimensions)
  SimpleTensor tensor_offsets;  // Device pointer to int64_t array of length num_tensors (or empty)

  // Logical shape: conceptual 2D shape of the grouped data (REQUIRED)
  // Represents how the 1D flattened data should be interpreted as 2D
  // Always 2D with positive dimensions
  NVTEShape logical_shape;

  NVTEGroupedTensor nvte_tensor;

  GroupedTensor(NVTEScalingMode scaling_mode, size_t num_tensors)
      : data(),
        columnwise_data(),
        scale_inv(),
        columnwise_scale_inv(),
        amax(),
        columnwise_amax(),
        scale(),
346
        scaling_mode(scaling_mode),
347
        num_tensors(num_tensors),
348
349
350
351
        first_dims(nullptr, std::vector<size_t>{0}, DType::kInt64),
        last_dims(nullptr, std::vector<size_t>{0}, DType::kInt64),
        tensor_offsets(nullptr, std::vector<size_t>{0}, DType::kInt64),
        logical_shape(nvte_make_shape(nullptr, 1)),
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
        nvte_tensor(0) {}

  explicit operator NVTEGroupedTensor() const noexcept { return nvte_tensor; }

  bool has_data() const noexcept { return data.has_data(); }
  bool has_columnwise_data() const noexcept { return columnwise_data.has_data(); }

  bool all_same_first_dim() const noexcept { return !first_dims.has_data(); }
  bool all_same_last_dim() const noexcept { return !last_dims.has_data(); }
  bool all_same_shape() const noexcept { return !first_dims.has_data() && !last_dims.has_data(); }
  bool varying_both_dims() const noexcept { return first_dims.has_data() && last_dims.has_data(); }

  size_t get_common_first_dim() const {
    NVTE_CHECK(all_same_first_dim(), "First dim varies across tensors");
    NVTE_CHECK(logical_shape.ndim == 2, "Logical shape must be 2D");
    if (all_same_shape()) {
      // When both dims are uniform: logical_shape = [num_tensors * M, N]
      return logical_shape.data[0] / num_tensors;
    } else {
      // When varying last dims but not first dim: logical_shape = [M, sum_of_last_dims]
      return logical_shape.data[0];
    }
  }
  size_t get_common_last_dim() const {
    NVTE_CHECK(all_same_last_dim(), "Last dim varies across tensors");
    NVTE_CHECK(logical_shape.ndim == 2, "Logical shape must be 2D");
    // For both uniform and varying first dim cases: logical_shape[1] is the common last dim
    return logical_shape.data[1];
  }

  DType dtype() const {
383
384
385
    if (!has_data() && has_columnwise_data()) {
      return columnwise_data.dtype;
    }
386
387
388
389
390
391
392
393
394
395
396
397
398
399
    return data.dtype;
  }

  void clear() {
    data.clear();
    columnwise_data.clear();
    scale_inv.clear();
    columnwise_scale_inv.clear();
    amax.clear();
    columnwise_amax.clear();
    scale.clear();
    first_dims.clear();
    last_dims.clear();
    tensor_offsets.clear();
400
    logical_shape = nvte_make_shape(nullptr, 1);
401
402
403
404
405
406
    num_tensors = 0;
    scaling_mode = NVTE_DELAYED_TENSOR_SCALING;
    nvte_tensor = 0;
  }
};

407
408
409
struct QuantizationConfig {
  bool force_pow_2_scales = false;
  float amax_epsilon = 0.0f;
410
  NVTETensor noop_tensor = nullptr;
411
412
413
  NVTETensor rng_state = nullptr;
  bool nvfp4_2d_quantization = false;
  bool stochastic_rounding = false;
414
  bool use_fast_math = false;
415
416

  static constexpr size_t attr_sizes[] = {
417
      sizeof(uint8_t),                       // force_pow_2_scales
418
419
      sizeof(float),                         // amax_epsilon
      sizeof(NVTETensor),                    // noop_tensor
420
      sizeof(Float8BlockScaleTensorFormat),  // (deprecated)
421
      sizeof(NVTETensor),                    // rng_seed and offset
422
423
424
      sizeof(uint8_t),                       // nvfp4_2d_quantization
      sizeof(uint8_t),                       // stochastic_rounding
      sizeof(uint8_t)                        // use_fast_math
425
426
427
  };
};

428
429
cudaDataType_t get_cuda_dtype(const transformer_engine::DType t);

Przemek Tredak's avatar
Przemek Tredak committed
430
431
template <typename T>
constexpr T DIVUP(const T &x, const T &y) {
432
  return (((x) + ((y)-1)) / (y));
Przemek Tredak's avatar
Przemek Tredak committed
433
434
}

435
436
437
438
439
440
441
template <typename T1, typename T2>
constexpr __device__ __host__ __forceinline__ uint64_t DIVUP_TO_MULTIPLE(const T1 &N, const T2 &M) {
  static_assert(std::is_integral<T1>::value && std::is_integral<T2>::value,
                "Integral type required.");
  return DIVUP(static_cast<uint64_t>(N), static_cast<uint64_t>(M)) * M;
}

Przemek Tredak's avatar
Przemek Tredak committed
442
using byte = uint8_t;
443
using int16 = int16_t;
Przemek Tredak's avatar
Przemek Tredak committed
444
using int32 = int32_t;
445
using int64 = int64_t;
Przemek Tredak's avatar
Przemek Tredak committed
446
447
448
449
450
using fp32 = float;
using fp16 = half;
using bf16 = nv_bfloat16;
using fp8e4m3 = __nv_fp8_e4m3;
using fp8e5m2 = __nv_fp8_e5m2;
451
452
453
#if CUDA_VERSION >= 12080
using fp8e8m0 = __nv_fp8_e8m0;
#endif
454
455
#if FP4_TYPE_SUPPORTED
using fp4e2m1 = __nv_fp4_e2m1;
456
457
using fp4e2m1x2 = __nv_fp4x2_e2m1;
using fp4e2m1x4 = __nv_fp4x4_e2m1;
458
#endif
459
using e8m0_t = uint8_t;
Przemek Tredak's avatar
Przemek Tredak committed
460

Tim Moon's avatar
Tim Moon committed
461
462
463
464
namespace detail {

template <typename T>
constexpr inline const char *type_name() noexcept;
465
466
467
468
469
#define TRANSFORMER_ENGINE_TYPE_NAME(T)                  \
  template <>                                            \
  inline constexpr const char *type_name<T>() noexcept { \
    return #T;                                           \
  }
Tim Moon's avatar
Tim Moon committed
470
TRANSFORMER_ENGINE_TYPE_NAME(uint8_t)
471
TRANSFORMER_ENGINE_TYPE_NAME(int16_t)
Tim Moon's avatar
Tim Moon committed
472
TRANSFORMER_ENGINE_TYPE_NAME(int32_t)
473
TRANSFORMER_ENGINE_TYPE_NAME(int64_t)
Tim Moon's avatar
Tim Moon committed
474
475
476
477
478
TRANSFORMER_ENGINE_TYPE_NAME(float)
TRANSFORMER_ENGINE_TYPE_NAME(half)
TRANSFORMER_ENGINE_TYPE_NAME(nv_bfloat16)
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e4m3)
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e5m2)
479
480
481
#if CUDA_VERSION >= 12080
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e8m0)
#endif
482
483
484
#if FP4_TYPE_SUPPORTED
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp4_e2m1)
#endif
Tim Moon's avatar
Tim Moon committed
485
486
#undef TRANSFORMER_ENGINE_TYPE_NAME

487
488
489
template <typename T>
struct TypeExtrema;

490
491
492
493
#if FP4_TYPE_SUPPORTED
template <>
struct TypeExtrema<fp4e2m1> {
  static constexpr float max = 6.0f;
494
  static constexpr float max_inverse = 1.0 / max;
495
496
497
};
#endif

498
499
500
template <>
struct TypeExtrema<fp8e4m3> {
  static constexpr float max = 448.0f;
501
  static constexpr float max_inverse = 1.0 / max;
502
503
504
505
506
};

template <>
struct TypeExtrema<fp8e5m2> {
  static constexpr float max = 57344.0f;
507
  static constexpr float max_inverse = 1.0 / max;
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
};

template <>
struct TypeExtrema<bf16> {
  // Hex float format of 1.(7 bits of 1) * 2 ^ 127
  static constexpr float max = 0x1.FEp127;
};

template <>
struct TypeExtrema<fp16> {
  // Hex float format of 1.(10 bits of 1) * 2 ^ 15
  static constexpr float max = 0x1.FFCp15;
};

template <typename T>
struct TypeExtrema {
  static constexpr float max = std::numeric_limits<T>::max();
};

Tim Moon's avatar
Tim Moon committed
527
}  // namespace detail
Przemek Tredak's avatar
Przemek Tredak committed
528

529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
template <typename T>
struct BitsNumber;

#if FP4_TYPE_SUPPORTED
template <>
struct BitsNumber<fp4e2m1> {
  static constexpr size_t num_bits = 4;
};
#endif

template <typename T>
struct BitsNumber {
  static constexpr size_t num_bits = 8 * sizeof(T);
};

Przemek Tredak's avatar
Przemek Tredak committed
544
template <typename T>
545
struct TypeInfo {
546
#if FP4_TYPE_SUPPORTED
547
548
549
550
551
552
  using types = std::tuple<byte, int16, int32, int64, fp32, fp16, bf16, fp8e4m3, fp8e5m2, fp4e2m1
#if CUDA_VERSION >= 12080
                           ,
                           fp8e8m0
#endif
                           >;
553
#else
554
555
556
557
558
559
  using types = std::tuple<byte, int16, int32, int64, fp32, fp16, bf16, fp8e4m3, fp8e5m2
#if CUDA_VERSION >= 12080
                           ,
                           fp8e8m0
#endif
                           >;
560
#endif
561
562
563

  template <typename U, DType current>
  struct Helper {
Przemek Tredak's avatar
Przemek Tredak committed
564
    constexpr static DType getType() {
565
566
567
568
569
570
      constexpr int i = static_cast<int>(current);
      if (std::is_same<U, typename std::tuple_element<i, types>::type>::value) {
        return current;
      } else {
        return Helper<U, static_cast<DType>(i + 1)>::getType();
      }
Przemek Tredak's avatar
Przemek Tredak committed
571
    }
572
573
574
575
576
577
  };

  template <typename U>
  struct Helper<U, DType::kNumTypes> {
    constexpr static DType getType() { return DType::kNumTypes; }
  };
Przemek Tredak's avatar
Przemek Tredak committed
578

579
580
581
582
583
584
  template <typename U>
  constexpr static DType getType() {
    return Helper<U, DType::kByte>::getType();
  }

  constexpr static DType dtype = getType<T>();
585
  constexpr static size_t size = BitsNumber<T>::num_bits;
586
  constexpr static float max_finite_value = detail::TypeExtrema<T>::max;
587
  constexpr static const char *name = detail::type_name<T>();
Przemek Tredak's avatar
Przemek Tredak committed
588
589
};

590
591
592
593
594
595
596
597
598
599
#if FP4_TYPE_SUPPORTED
#define SWITCH_FP4_TYPE_HANDLE(type, ...) \
  case DType::kFloat4E2M1: {              \
    using type = fp4e2m1;                 \
    { __VA_ARGS__ }                       \
  } break;
#else
#define SWITCH_FP4_TYPE_HANDLE(type, ...)  // do nothing
#endif

Przemek Tredak's avatar
Przemek Tredak committed
600
#define TRANSFORMER_ENGINE_TYPE_SWITCH_ALL(dtype, type, ...) \
601
602
603
604
605
606
  switch (dtype) {                                           \
    using namespace transformer_engine;                      \
    case DType::kByte: {                                     \
      using type = unsigned char;                            \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
607
608
609
610
    case DType::kInt16: {                                    \
      using type = int16_t;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
611
    case DType::kInt32: {                                    \
612
613
614
615
616
      using type = int32_t;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kInt64: {                                    \
      using type = int64_t;                                  \
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat32: {                                  \
      using type = float;                                    \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat16: {                                  \
      using type = fp16;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kBFloat16: {                                 \
      using type = bf16;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat8E4M3: {                               \
      using type = fp8e4m3;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat8E5M2: {                               \
      using type = fp8e5m2;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
639
640
641
642
    case DType::kFloat8E8M0: {                               \
      using type = byte;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
643
      SWITCH_FP4_TYPE_HANDLE(type, __VA_ARGS__)              \
644
645
646
    default:                                                 \
      NVTE_ERROR("Invalid type.");                           \
  }
Przemek Tredak's avatar
Przemek Tredak committed
647

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
#define TRANSFORMER_ENGINE_TYPE_SWITCH_FLOAT(dtype, type, ...) \
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat32: {                                    \
      using type = float;                                      \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E4M3: {                                 \
      using type = fp8e4m3;                                    \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E5M2: {                                 \
      using type = fp8e5m2;                                    \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    default:                                                   \
      NVTE_ERROR("Invalid type.");                             \
  }

Przemek Tredak's avatar
Przemek Tredak committed
675
#define TRANSFORMER_ENGINE_TYPE_SWITCH_OUTPUT(dtype, type, ...) \
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
  switch (dtype) {                                              \
    using namespace transformer_engine;                         \
    case DType::kFloat32: {                                     \
      using type = float;                                       \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat16: {                                     \
      using type = fp16;                                        \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kBFloat16: {                                    \
      using type = bf16;                                        \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat8E5M2: {                                  \
      using type = fp8e5m2;                                     \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat8E4M3: {                                  \
      using type = fp8e4m3;                                     \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    default:                                                    \
      NVTE_ERROR("Invalid type.");                              \
  }
Przemek Tredak's avatar
Przemek Tredak committed
701

702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
#define TRANSFORMER_ENGINE_TYPE_SWITCH_NON_FP8ONLY(dtype, type, ...) \
  switch (dtype) {                                                   \
    using namespace transformer_engine;                              \
    case DType::kFloat32: {                                          \
      using type = float;                                            \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    case DType::kFloat16: {                                          \
      using type = fp16;                                             \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    case DType::kBFloat16: {                                         \
      using type = bf16;                                             \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    default:                                                         \
      NVTE_ERROR("Invalid type.");                                   \
  }

721
722
723
724
725
726
727
728
729
730
731
732
// Add a pack_size argument to select the packed type for FP4
#define TRANSFORMER_ENGINE_TYPE_SWITCH_FP4x2_ONLY(dtype, pack_size, type, ...) \
  switch (dtype) {                                                             \
    using namespace transformer_engine;                                        \
    case DType::kFloat4E2M1: {                                                 \
      using type = __nv_fp4x2_storage_t;                                       \
      { __VA_ARGS__ }                                                          \
    } break;                                                                   \
    default:                                                                   \
      NVTE_ERROR("Invalid type.");                                             \
  }

Przemek Tredak's avatar
Przemek Tredak committed
733
#define TRANSFORMER_ENGINE_TYPE_SWITCH_FP8ONLY(dtype, type, ...) \
734
735
736
737
738
739
740
741
742
743
744
745
746
  switch (dtype) {                                               \
    using namespace transformer_engine;                          \
    case DType::kFloat8E5M2: {                                   \
      using type = fp8e5m2;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    case DType::kFloat8E4M3: {                                   \
      using type = fp8e4m3;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    default:                                                     \
      NVTE_ERROR("Invalid type.");                               \
  }
Przemek Tredak's avatar
Przemek Tredak committed
747
748

#define TRANSFORMER_ENGINE_TYPE_SWITCH_INPUT(dtype, type, ...) \
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat32: {                                    \
      using type = float;                                      \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E5M2:                                   \
    case DType::kFloat8E4M3: {                                 \
      NVTE_ERROR("FP8 type not instantiated for input.");      \
    } break;                                                   \
767
768
769
    case DType::kFloat4E2M1: {                                 \
      NVTE_ERROR("FP4 type not instantiated for input.");      \
    } break;                                                   \
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
    default:                                                   \
      NVTE_ERROR("Invalid type.");                             \
  }

#define TRANSFORMER_ENGINE_TYPE_SWITCH_16BIT(dtype, type, ...) \
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      __VA_ARGS__;                                             \
      break;                                                   \
    }                                                          \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      __VA_ARGS__;                                             \
      break;                                                   \
    }                                                          \
    default:                                                   \
      NVTE_ERROR("Invalid type for 16 bit.");                  \
  }
790

791
792
793
794
795
796
797
798
799
800
801
802
803
#define TRANSFORMER_ENGINE_MX_SCALE_DIM_SWITCH(SCALE_DIM, DIM, ...) \
  switch (SCALE_DIM) {                                              \
    case 1: {                                                       \
      constexpr size_t DIM = 1;                                     \
      { __VA_ARGS__ }                                               \
    } break;                                                        \
    case 32: {                                                      \
      constexpr size_t DIM = 32;                                    \
      { __VA_ARGS__ }                                               \
    } break;                                                        \
    default: {                                                      \
      NVTE_ERROR("Invalid size of the MX scaling factor.");         \
    }                                                               \
804
  }
805

806
807
808
809
810
811
812
813
814
#define TRANSFORMER_ENGINE_SWITCH_CONDITION(CONDITION, FLAG, ...) \
  if (CONDITION) {                                                \
    constexpr bool FLAG = true;                                   \
    { __VA_ARGS__ }                                               \
  } else {                                                        \
    constexpr bool FLAG = false;                                  \
    { __VA_ARGS__ }                                               \
  }

815
////////////////////////////////////////////////////////////////////////////////////////////////////
Przemek Tredak's avatar
Przemek Tredak committed
816

817
inline int log2_ceil(int value) {
818
819
820
  int log2_value = 0;
  while ((1 << log2_value) < value) ++log2_value;
  return log2_value;
821
822
}

823
824
825
826
827
828
829
830
template <size_t B>
inline size_t alignTo(size_t x) {
  size_t r = x % B;
  if (r == 0) return x;

  return x + B - r;
}

Przemek Tredak's avatar
Przemek Tredak committed
831
832
833
834
835
836
837
838
839
template <typename T>
struct is_fp8 : std::false_type {};

template <>
struct is_fp8<fp8e4m3> : std::true_type {};

template <>
struct is_fp8<fp8e5m2> : std::true_type {};

840
841
842
843
844
845
846
847
template <typename T>
struct is_fp4 : std::false_type {};

#if FP4_TYPE_SUPPORTED
template <>
struct is_fp4<fp4e2m1> : std::true_type {};
#endif

848
849
850
851
852
853
// [128,4] rowwise and [4,128] colwise alignment requirements for the tensor with scaling factors
constexpr size_t scale_tensor_alignment_X_rowwise = 4;
constexpr size_t scale_tensor_alignment_Y_rowwise = 128;
constexpr size_t scale_tensor_alignment_X_colwise = 128;
constexpr size_t scale_tensor_alignment_Y_colwise = 4;

854
// Alignment requirements for the Tensor Memory Accelerator (TMA)
855
856
constexpr size_t TMA_GMEM_ALIGNMENT = 16;    // global memory address alignment
constexpr size_t TMA_SHMEM_ALIGNMENT = 128;  // shared memory address alignment
857
858
859
860
861
862
863
864
865

inline bool is_aligned_ptr(const void *ptr, size_t alignment) {
  return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
}

inline bool is_aligned_tensor_data(const Tensor &t, size_t alignment) {
  return is_aligned_ptr(static_cast<const void *>(t.data.dptr), alignment);
}

Przemek Tredak's avatar
Przemek Tredak committed
866
size_t typeToSize(const DType type);
867
868
size_t typeToNumBits(const DType type);

869
void CheckNoopTensor(const Tensor &t, const std::string &name);
870
871
872
void CheckInputTensor(const Tensor &t, const std::string &name);
void CheckOutputTensor(const Tensor &t, const std::string &name, bool allow_empty = false);

873
874
875
876
877
878
879
/*! \brief Update a tensor's FP8 scale-inverse
 *
 * The FP8 scale-inverse (dequantization scaling factor) is updated
 * with the reciprocal of the FP8 scale (quantization scaling factor).
 */
void update_tensor_scale_inv(Tensor *t, cudaStream_t stream);

880
#define NVTE_API_CALL(api_name) \
881
  transformer_engine::nvtx::NVTXWrapper _##api_name##_nvtx_wrapper(#api_name);
882

883
884
885
886
887
void checkCuDriverContext(CUstream stream);

CUtensorMapDataType get_CUtensorMapDataType(DType dtype);

// Set up parameters to create TMA descriptor.
888
889
890
891
892
void create_2D_tensor_map(
    CUtensorMap &tensorMap, const SimpleTensor &tensor, const uint64_t globalY,
    const uint64_t globalX, const uint32_t shmemY, const uint32_t shmemX,
    const uint32_t stride_elems, const uint32_t offset_elems, const size_t type_num_bits,
    const CUtensorMapSwizzle swizzle = CUtensorMapSwizzle::CU_TENSOR_MAP_SWIZZLE_NONE);
893
894
895

bool is_supported_by_CC_100();

896
897
898
std::vector<std::vector<Tensor *>> convert_tensor_array(NVTETensor **nvte_tensors,
                                                        size_t outer_size, size_t inner_size);

899
900
Tensor *convertNVTETensor(const NVTETensor tensor);
Tensor *convertNVTETensorCheck(const NVTETensor tensor);
901
902
903
904
905
906
907
908
909
910

GroupedTensor *convertNVTEGroupedTensor(const NVTEGroupedTensor tensor);
GroupedTensor *convertNVTEGroupedTensorCheck(const NVTEGroupedTensor tensor);

// Helper functions for GroupedTensor validation
void CheckGroupedTensorShapeArrays(const GroupedTensor &t, const std::string &name);
void CheckInputGroupedTensor(const GroupedTensor &t, const std::string &name);
void CheckOutputGroupedTensor(const GroupedTensor &t, const std::string &name,
                              bool allow_empty = false);

Przemek Tredak's avatar
Przemek Tredak committed
911
912
913
}  // namespace transformer_engine

#endif  // TRANSFORMER_ENGINE_COMMON_COMMON_H_