common.h 25.6 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
7
8
 *
 * See LICENSE for license information.
 ************************************************************************/

#ifndef TRANSFORMER_ENGINE_COMMON_COMMON_H_
#define TRANSFORMER_ENGINE_COMMON_COMMON_H_
yuguo's avatar
yuguo committed
9
#ifndef __HIP_PLATFORM_AMD__
10
#include <cudaTypedefs.h>
yuguo's avatar
yuguo committed
11
#endif
12
13
14
15
16
17
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <cuda_fp8.h>
#include <cuda_runtime_api.h>
#include <transformer_engine/transformer_engine.h>

18
#include <cstdint>
Przemek Tredak's avatar
Przemek Tredak committed
19
20
21
22
#include <functional>
#include <stdexcept>
#include <string>
#include <tuple>
Tim Moon's avatar
Tim Moon committed
23
24
#include <type_traits>
#include <unordered_map>
Przemek Tredak's avatar
Przemek Tredak committed
25
#include <vector>
Tim Moon's avatar
Tim Moon committed
26
27

#include "./nvtx.h"
yuguo's avatar
yuguo committed
28
29
30
#ifdef __HIP_PLATFORM_AMD__
#include "./util/hip_driver.h"
#else
31
#include "./util/cuda_driver.h"
yuguo's avatar
yuguo committed
32
#endif
Tim Moon's avatar
Tim Moon committed
33
#include "./util/logging.h"
Przemek Tredak's avatar
Przemek Tredak committed
34
35
36

namespace transformer_engine {

37
38
39
std::string to_string(const DType type);
std::string to_string(const NVTEScalingMode &mode);

40
41
42
43
44
45
46
47
48
49
50
51
inline bool is_tensor_scaling(const NVTEScalingMode &mode) {
  return mode == NVTE_DELAYED_TENSOR_SCALING;
}

inline bool is_block_scaling(const NVTEScalingMode &mode) { return !is_tensor_scaling(mode); }

inline bool is_delayed_tensor_scaling(const NVTEScalingMode &mode) {
  return mode == NVTE_DELAYED_TENSOR_SCALING;
}

inline bool is_mxfp_scaling(const NVTEScalingMode &mode) { return mode == NVTE_MXFP8_1D_SCALING; }

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
inline size_t product(const std::vector<size_t> &shape, const size_t begin, const size_t end) {
  NVTE_CHECK(begin <= end && end <= shape.size(), "Attempted to access entries ", begin, " to ",
             end, " in a vector with ", shape.size(), " entries");
  size_t ret = 1;
  for (size_t i = begin; i < end; ++i) {
    ret *= shape[i];
  }
  return ret;
}

inline size_t product(const std::vector<size_t> &shape) {
  size_t ret = 1;
  for (const auto &elem : shape) {
    ret *= elem;
  }
  return ret;
}

70
71
72
73
74
struct SimpleTensor {
  void *dptr;
  std::vector<size_t> shape;
  DType dtype;

75
76
  SimpleTensor(void *dptr, const std::vector<size_t> &shape, DType dtype)
      : dptr(dptr), shape(shape), dtype(dtype) {}
77
78
79
80
81
82

  SimpleTensor(const NVTEBasicTensor &tensor)  // NOLINT
      : dptr(tensor.data_ptr),
        shape(tensor.shape.data, tensor.shape.data + tensor.shape.ndim),
        dtype(static_cast<DType>(tensor.dtype)) {}

83
  SimpleTensor() : SimpleTensor(nullptr, {}, DType::kFloat32) {}
84
85

  operator NVTEBasicTensor() const {
86
87
    return {dptr, static_cast<NVTEDType>(dtype),
            nvte_make_shape(this->shape.data(), this->shape.size())};
88
89
90
91
92
93
94
95
96
  }

  int numel() const {
    size_t acc = 1;
    for (const auto &dim : shape) {
      acc *= dim;
    }
    return acc;
  }
97
};
Przemek Tredak's avatar
Przemek Tredak committed
98

99
100
struct Tensor {
  SimpleTensor data;
101
  SimpleTensor columnwise_data;
102
103
104
  SimpleTensor amax;
  SimpleTensor scale;
  SimpleTensor scale_inv;
105
106
  SimpleTensor columnwise_scale_inv;

107
 public:
108
  NVTEScalingMode scaling_mode;
109

110
111
  Tensor()
      : data(),
112
        columnwise_data(),
113
114
        amax(nullptr, {1}, DType::kFloat32),
        scale(nullptr, {1}, DType::kFloat32),
115
116
117
118
        scale_inv(nullptr, {1}, DType::kFloat32),
        columnwise_scale_inv(nullptr, {1}, DType::kFloat32),
        scaling_mode(NVTE_DELAYED_TENSOR_SCALING) {}

119
  size_t numel() const {
120
    size_t acc = 1;
121
    for (const auto dim : shape()) {
122
123
124
125
126
127
128
      acc *= dim;
    }
    return acc;
  }

  bool has_data() const noexcept { return data.dptr != nullptr; }

129
130
131
132
  // Check for size (not just pointer) for 0-dim or no token cases.
  bool has_columnwise_data() const noexcept {
    return columnwise_data.dptr != nullptr || columnwise_data.shape.size() != 0;
  }
133
134
135
136
137
138
139
140

  DType dtype() const {
    if (has_data()) return data.dtype;
    if (has_columnwise_data()) return columnwise_data.dtype;
    // Fallback, used e.g. in workspace
    return data.dtype;
  }

141
142
143
144
145
146
147
148
  size_t dim() const {
    if (!has_data() && has_columnwise_data()) {
      return columnwise_data.shape.size();
    } else {
      return data.shape.size();
    }
  }

149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
  std::vector<size_t> shape() const {
    /* Note: We sometimes experience spurious compiler errors
     * (-Wstringop-overflow) from this function. It appears that GCC
     * has some bugs with std::vector (see
     * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109569).
     */
    switch (scaling_mode) {
      case NVTE_DELAYED_TENSOR_SCALING:
        if (!has_data() && has_columnwise_data()) {
          std::vector<size_t> ret;
          if (!columnwise_data.shape.empty()) {
            for (size_t i = 1; i < columnwise_data.shape.size(); i++) {
              ret.push_back(columnwise_data.shape[i]);
            }
            ret.push_back(columnwise_data.shape.front());
          }
          return ret;
        } else {
          return data.shape;
        }
        break;
      case NVTE_MXFP8_1D_SCALING:
        if (!has_data() && has_columnwise_data()) {
          return columnwise_data.shape;
        } else {
          return data.shape;
        }
        break;
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
      case NVTE_BLOCK_SCALING_1D:
      case NVTE_BLOCK_SCALING_2D: {
        if (!has_data() && has_columnwise_data()) {
          std::vector<size_t> shape;
          size_t ndim = columnwise_data.shape.size();
          shape.reserve(ndim);
          for (size_t i = 0; i + 1 < ndim; ++i) {
            shape.push_back(columnwise_data.shape[i + 1]);
          }
          if (ndim > 0) {
            shape.push_back(columnwise_data.shape[0]);
          }
          return shape;
        } else {
          // NOTE: We may have removed the data pointer from
          // data by setting usage. In that case, we return
          // the non-null shape. It is our best guess at the most
          // recent shape.
          return data.shape;
        }
        break;
      }
199
200
201
202
203
204
      default:
        NVTE_ERROR("Cannot parse tensor shape with scaling mode \"", to_string(scaling_mode), "\"");
        return {};
    }
  }

205
206
207
208
209
210
  /*! Matrix height after tensor is flattened to 2D
   *
   * If a tensor has dimensions (D1, D2, ..., Dn), it is reinterpreted
   * as a (D1*D2*...*D(n-1), Dn) matrix.
   */
  size_t flat_first_dim() const {
211
212
213
214
215
    const auto &full_shape = shape();
    size_t ret = 1;
    if (!full_shape.empty()) {
      for (size_t i = 0; i < full_shape.size() - 1; i++) {
        ret *= full_shape[i];
216
217
      }
    }
218
    return ret;
219
220
221
222
223
224
225
226
  }

  /*! Matrix width after tensor is flattened to 2D
   *
   * If a tensor has dimensions (D1, D2, ..., Dn), it is reinterpreted
   * as a (D1*D2*...*D(n-1), Dn) matrix.
   */
  size_t flat_last_dim() const {
227
228
229
230
231
    const auto &full_shape = shape();
    if (full_shape.empty()) {
      return 1;
    } else {
      return full_shape.back();
232
233
    }
  }
Przemek Tredak's avatar
Przemek Tredak committed
234
235
};

236
237
238
struct QuantizationConfig {
  bool force_pow_2_scales = false;
  float amax_epsilon = 0.0f;
239
  NVTETensor noop_tensor = nullptr;
240
241

  static constexpr size_t attr_sizes[] = {
242
243
244
      sizeof(bool),       // force_pow_2_scales
      sizeof(float),      // amax_epsilon
      sizeof(NVTETensor)  // noop_tensor
245
246
247
  };
};

Przemek Tredak's avatar
Przemek Tredak committed
248
249
template <typename T>
constexpr T DIVUP(const T &x, const T &y) {
250
  return (((x) + ((y)-1)) / (y));
Przemek Tredak's avatar
Przemek Tredak committed
251
252
253
}

using byte = uint8_t;
254
using int16 = int16_t;
Przemek Tredak's avatar
Przemek Tredak committed
255
using int32 = int32_t;
256
using int64 = int64_t;
Przemek Tredak's avatar
Przemek Tredak committed
257
258
using fp32 = float;
using fp16 = half;
yuguo's avatar
yuguo committed
259
using int8 = int8_t;
yuguo's avatar
yuguo committed
260
#ifndef __HIP_PLATFORM_AMD__
Przemek Tredak's avatar
Przemek Tredak committed
261
262
263
using bf16 = nv_bfloat16;
using fp8e4m3 = __nv_fp8_e4m3;
using fp8e5m2 = __nv_fp8_e5m2;
yuguo's avatar
yuguo committed
264
#else
yuguo's avatar
yuguo committed
265
using bf16 = __hip_bfloat16;
266
267
using fp8e4m3 = te_hip_fp8_e4m3;
using fp8e5m2 = te_hip_fp8_e5m2;
yuguo's avatar
yuguo committed
268
#endif
269
270
271
272
#if CUDA_VERSION >= 12080
using fp8e8m0 = __nv_fp8_e8m0;
#endif
using e8m0_t = uint8_t;
yuguo's avatar
yuguo committed
273
using int8 = int8_t;
Przemek Tredak's avatar
Przemek Tredak committed
274

Tim Moon's avatar
Tim Moon committed
275
276
277
278
namespace detail {

template <typename T>
constexpr inline const char *type_name() noexcept;
279
280
281
282
283
#define TRANSFORMER_ENGINE_TYPE_NAME(T)                  \
  template <>                                            \
  inline constexpr const char *type_name<T>() noexcept { \
    return #T;                                           \
  }
Tim Moon's avatar
Tim Moon committed
284
TRANSFORMER_ENGINE_TYPE_NAME(uint8_t)
285
TRANSFORMER_ENGINE_TYPE_NAME(int16_t)
Tim Moon's avatar
Tim Moon committed
286
TRANSFORMER_ENGINE_TYPE_NAME(int32_t)
287
TRANSFORMER_ENGINE_TYPE_NAME(int64_t)
Tim Moon's avatar
Tim Moon committed
288
289
TRANSFORMER_ENGINE_TYPE_NAME(float)
TRANSFORMER_ENGINE_TYPE_NAME(half)
yuguo's avatar
yuguo committed
290
#ifdef __HIP_PLATFORM_AMD__
yuguo's avatar
yuguo committed
291
TRANSFORMER_ENGINE_TYPE_NAME(__hip_bfloat16)
292
293
TRANSFORMER_ENGINE_TYPE_NAME(te_hip_fp8_e4m3)
TRANSFORMER_ENGINE_TYPE_NAME(te_hip_fp8_e5m2)
yuguo's avatar
yuguo committed
294
#else
Tim Moon's avatar
Tim Moon committed
295
296
297
TRANSFORMER_ENGINE_TYPE_NAME(nv_bfloat16)
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e4m3)
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e5m2)
yuguo's avatar
yuguo committed
298
#endif
299
300
301
#if CUDA_VERSION >= 12080
TRANSFORMER_ENGINE_TYPE_NAME(__nv_fp8_e8m0)
#endif
Tim Moon's avatar
Tim Moon committed
302
303
#undef TRANSFORMER_ENGINE_TYPE_NAME

304
305
306
307
308
template <typename T>
struct TypeExtrema;

template <>
struct TypeExtrema<fp8e4m3> {
yuguo's avatar
yuguo committed
309
#ifndef __HIP_PLATFORM_AMD__
310
  static constexpr float max = 448.0f;
yuguo's avatar
yuguo committed
311
312
313
#else
  static constexpr float max = 240.0f;
#endif
314
315
};

yuguo's avatar
yuguo committed
316
317
318
319
320
template <>
struct TypeExtrema<int8> {
  static constexpr float max = 127.0f;
};

321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
template <>
struct TypeExtrema<fp8e5m2> {
  static constexpr float max = 57344.0f;
};

template <>
struct TypeExtrema<bf16> {
  // Hex float format of 1.(7 bits of 1) * 2 ^ 127
  static constexpr float max = 0x1.FEp127;
};

template <>
struct TypeExtrema<fp16> {
  // Hex float format of 1.(10 bits of 1) * 2 ^ 15
  static constexpr float max = 0x1.FFCp15;
};

template <typename T>
struct TypeExtrema {
  static constexpr float max = std::numeric_limits<T>::max();
};

Tim Moon's avatar
Tim Moon committed
343
}  // namespace detail
Przemek Tredak's avatar
Przemek Tredak committed
344
345

template <typename T>
346
struct TypeInfo {
347
  using types = std::tuple<byte, int16, int32, int64, fp32, fp16, bf16, fp8e4m3, fp8e5m2, int8>;
348
349
350

  template <typename U, DType current>
  struct Helper {
Przemek Tredak's avatar
Przemek Tredak committed
351
    constexpr static DType getType() {
352
353
354
355
356
357
      constexpr int i = static_cast<int>(current);
      if (std::is_same<U, typename std::tuple_element<i, types>::type>::value) {
        return current;
      } else {
        return Helper<U, static_cast<DType>(i + 1)>::getType();
      }
Przemek Tredak's avatar
Przemek Tredak committed
358
    }
359
360
361
362
363
364
  };

  template <typename U>
  struct Helper<U, DType::kNumTypes> {
    constexpr static DType getType() { return DType::kNumTypes; }
  };
Przemek Tredak's avatar
Przemek Tredak committed
365

366
367
368
369
370
371
372
  template <typename U>
  constexpr static DType getType() {
    return Helper<U, DType::kByte>::getType();
  }

  constexpr static DType dtype = getType<T>();
  constexpr static size_t size = sizeof(T);
373
  constexpr static float max_finite_value = detail::TypeExtrema<T>::max;
374
  constexpr static const char *name = detail::type_name<T>();
Przemek Tredak's avatar
Przemek Tredak committed
375
376
377
};

#define TRANSFORMER_ENGINE_TYPE_SWITCH_ALL(dtype, type, ...) \
378
379
380
381
382
383
  switch (dtype) {                                           \
    using namespace transformer_engine;                      \
    case DType::kByte: {                                     \
      using type = unsigned char;                            \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
384
385
386
387
    case DType::kInt16: {                                    \
      using type = int16_t;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
388
    case DType::kInt32: {                                    \
389
390
391
392
393
      using type = int32_t;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kInt64: {                                    \
      using type = int64_t;                                  \
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat32: {                                  \
      using type = float;                                    \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat16: {                                  \
      using type = fp16;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kBFloat16: {                                 \
      using type = bf16;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat8E4M3: {                               \
      using type = fp8e4m3;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
    case DType::kFloat8E5M2: {                               \
      using type = fp8e5m2;                                  \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
416
417
418
419
    case DType::kFloat8E8M0: {                               \
      using type = byte;                                     \
      { __VA_ARGS__ }                                        \
    } break;                                                 \
420
421
422
    default:                                                 \
      NVTE_ERROR("Invalid type.");                           \
  }
Przemek Tredak's avatar
Przemek Tredak committed
423

424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
#define TRANSFORMER_ENGINE_TYPE_SWITCH_FLOAT(dtype, type, ...) \
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat32: {                                    \
      using type = float;                                      \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E4M3: {                                 \
      using type = fp8e4m3;                                    \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E5M2: {                                 \
      using type = fp8e5m2;                                    \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    default:                                                   \
      NVTE_ERROR("Invalid type.");                             \
  }

Przemek Tredak's avatar
Przemek Tredak committed
451
#define TRANSFORMER_ENGINE_TYPE_SWITCH_OUTPUT(dtype, type, ...) \
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
  switch (dtype) {                                              \
    using namespace transformer_engine;                         \
    case DType::kFloat32: {                                     \
      using type = float;                                       \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat16: {                                     \
      using type = fp16;                                        \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kBFloat16: {                                    \
      using type = bf16;                                        \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat8E5M2: {                                  \
      using type = fp8e5m2;                                     \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    case DType::kFloat8E4M3: {                                  \
      using type = fp8e4m3;                                     \
      { __VA_ARGS__ }                                           \
    } break;                                                    \
    default:                                                    \
      NVTE_ERROR("Invalid type.");                              \
  }
Przemek Tredak's avatar
Przemek Tredak committed
477

478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
#define TRANSFORMER_ENGINE_TYPE_SWITCH_NON_FP8ONLY(dtype, type, ...) \
  switch (dtype) {                                                   \
    using namespace transformer_engine;                              \
    case DType::kFloat32: {                                          \
      using type = float;                                            \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    case DType::kFloat16: {                                          \
      using type = fp16;                                             \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    case DType::kBFloat16: {                                         \
      using type = bf16;                                             \
      { __VA_ARGS__ }                                                \
    } break;                                                         \
    default:                                                         \
      NVTE_ERROR("Invalid type.");                                   \
  }

Przemek Tredak's avatar
Przemek Tredak committed
497
#define TRANSFORMER_ENGINE_TYPE_SWITCH_FP8ONLY(dtype, type, ...) \
498
499
500
501
502
503
504
505
506
507
508
509
510
  switch (dtype) {                                               \
    using namespace transformer_engine;                          \
    case DType::kFloat8E5M2: {                                   \
      using type = fp8e5m2;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    case DType::kFloat8E4M3: {                                   \
      using type = fp8e4m3;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    default:                                                     \
      NVTE_ERROR("Invalid type.");                               \
  }
Przemek Tredak's avatar
Przemek Tredak committed
511

yuguo's avatar
yuguo committed
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
#define TRANSFORMER_ENGINE_TYPE_SWITCH_8BIT(dtype, type, ...)    \
  switch (dtype) {                                               \
    using namespace transformer_engine;                          \
    case DType::kFloat8E5M2: {                                   \
      using type = fp8e5m2;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    case DType::kFloat8E4M3: {                                   \
      using type = fp8e4m3;                                      \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    case DType::kInt8: {                                         \
      using type = int8;                                         \
      { __VA_ARGS__ }                                            \
    } break;                                                     \
    default:                                                     \
      NVTE_ERROR("Invalid type.");                               \
  }

Przemek Tredak's avatar
Przemek Tredak committed
531
#define TRANSFORMER_ENGINE_TYPE_SWITCH_INPUT(dtype, type, ...) \
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat32: {                                    \
      using type = float;                                      \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      { __VA_ARGS__ }                                          \
    } break;                                                   \
    case DType::kFloat8E5M2:                                   \
    case DType::kFloat8E4M3: {                                 \
      NVTE_ERROR("FP8 type not instantiated for input.");      \
    } break;                                                   \
    default:                                                   \
      NVTE_ERROR("Invalid type.");                             \
  }

#define TRANSFORMER_ENGINE_TYPE_SWITCH_16BIT(dtype, type, ...) \
  switch (dtype) {                                             \
    using namespace transformer_engine;                        \
    case DType::kFloat16: {                                    \
      using type = fp16;                                       \
      __VA_ARGS__;                                             \
      break;                                                   \
    }                                                          \
    case DType::kBFloat16: {                                   \
      using type = bf16;                                       \
      __VA_ARGS__;                                             \
      break;                                                   \
    }                                                          \
    default:                                                   \
      NVTE_ERROR("Invalid type for 16 bit.");                  \
  }
570

571
572
573
574
575
576
577
578
579
580
581
582
583
#define TRANSFORMER_ENGINE_MX_SCALE_DIM_SWITCH(SCALE_DIM, DIM, ...) \
  switch (SCALE_DIM) {                                              \
    case 1: {                                                       \
      constexpr size_t DIM = 1;                                     \
      { __VA_ARGS__ }                                               \
    } break;                                                        \
    case 32: {                                                      \
      constexpr size_t DIM = 32;                                    \
      { __VA_ARGS__ }                                               \
    } break;                                                        \
    default: {                                                      \
      NVTE_ERROR("Invalid size of the MX scaling factor.");         \
    }                                                               \
584
  }
585

586
587
588
589
590
591
592
593
594
#define TRANSFORMER_ENGINE_SWITCH_CONDITION(CONDITION, FLAG, ...) \
  if (CONDITION) {                                                \
    constexpr bool FLAG = true;                                   \
    { __VA_ARGS__ }                                               \
  } else {                                                        \
    constexpr bool FLAG = false;                                  \
    { __VA_ARGS__ }                                               \
  }

595
////////////////////////////////////////////////////////////////////////////////////////////////////
Przemek Tredak's avatar
Przemek Tredak committed
596

597
inline int log2_ceil(int value) {
598
599
600
  int log2_value = 0;
  while ((1 << log2_value) < value) ++log2_value;
  return log2_value;
601
602
}

603
604
605
606
607
608
609
610
template <size_t B>
inline size_t alignTo(size_t x) {
  size_t r = x % B;
  if (r == 0) return x;

  return x + B - r;
}

Przemek Tredak's avatar
Przemek Tredak committed
611
612
613
614
615
616
617
618
619
template <typename T>
struct is_fp8 : std::false_type {};

template <>
struct is_fp8<fp8e4m3> : std::true_type {};

template <>
struct is_fp8<fp8e5m2> : std::true_type {};

620
621
622
623
624
625
// [128,4] rowwise and [4,128] colwise alignment requirements for the tensor with scaling factors
constexpr size_t scale_tensor_alignment_X_rowwise = 4;
constexpr size_t scale_tensor_alignment_Y_rowwise = 128;
constexpr size_t scale_tensor_alignment_X_colwise = 128;
constexpr size_t scale_tensor_alignment_Y_colwise = 4;

626
627
628
629
630
631
632
633
634
635
636
// Alignment requirements for the Tensor Memory Accelerator (TMA)
constexpr int TMA_gmem_alignment = 16;  // global memory address alignment

inline bool is_aligned_ptr(const void *ptr, size_t alignment) {
  return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
}

inline bool is_aligned_tensor_data(const Tensor &t, size_t alignment) {
  return is_aligned_ptr(static_cast<const void *>(t.data.dptr), alignment);
}

Przemek Tredak's avatar
Przemek Tredak committed
637
638
size_t typeToSize(const DType type);

639
void CheckNoopTensor(const Tensor &t, const std::string &name);
640
641
642
643
644
void CheckInputTensor(const Tensor &t, const std::string &name);
void CheckOutputTensor(const Tensor &t, const std::string &name, bool allow_empty = false);

bool is_fp8_dtype(const DType t);

645
646
647
648
649
650
651
/*! \brief Update a tensor's FP8 scale-inverse
 *
 * The FP8 scale-inverse (dequantization scaling factor) is updated
 * with the reciprocal of the FP8 scale (quantization scaling factor).
 */
void update_tensor_scale_inv(Tensor *t, cudaStream_t stream);

652
#define NVTE_API_CALL(api_name) \
653
  transformer_engine::nvtx::NVTXWrapper _##api_name##_nvtx_wrapper(#api_name);
654

655
656
void checkCuDriverContext(CUstream stream);

yuguo's avatar
yuguo committed
657
#ifndef __HIP_PLATFORM_AMD__
658
659
660
661
662
663
664
CUtensorMapDataType get_CUtensorMapDataType(DType dtype);

// Set up parameters to create TMA descriptor.
void create_2D_tensor_map(CUtensorMap &tensorMap, const SimpleTensor &tensor,
                          const uint64_t globalY, const uint64_t globalX, const uint32_t shmemY,
                          const uint32_t shmemX, const uint32_t stride_elems,
                          const uint32_t offset_elems, const size_t type_size);
yuguo's avatar
yuguo committed
665
#endif
666
667
668

bool is_supported_by_CC_100();

669
670
671
std::vector<std::vector<Tensor *>> convert_tensor_array(NVTETensor **nvte_tensors,
                                                        size_t outer_size, size_t inner_size);

Przemek Tredak's avatar
Przemek Tredak committed
672
673
674
}  // namespace transformer_engine

#endif  // TRANSFORMER_ENGINE_COMMON_COMMON_H_