cast_transpose.cu 15.5 KB
Newer Older
Przemek Tredak's avatar
Przemek Tredak committed
1
/*************************************************************************
2
 * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
3
4
5
6
 *
 * See LICENSE for license information.
 ************************************************************************/

7
#include <cuda_runtime.h>
8
#include <transformer_engine/cast_transpose_noop.h>
Przemek Tredak's avatar
Przemek Tredak committed
9
10
#include <transformer_engine/transpose.h>

11
#include <algorithm>
Przemek Tredak's avatar
Przemek Tredak committed
12

13
14
15
#include "../util/rtc.h"
#include "../util/string.h"
#include "../utils.cuh"
16
#include "cast_transpose.h"
Przemek Tredak's avatar
Przemek Tredak committed
17

18
namespace transformer_engine::detail {
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
namespace {

// String with RTC kernel implementation
#include "string_code_transpose_rtc_cast_transpose_cu.h"

// Hard-coded kernel parameters
using CType = float;
constexpr size_t warps_per_tile = 4;
constexpr size_t block_size = THREADS_PER_WARP * warps_per_tile;

/* Performance heuristics for optimized kernel parameters */
struct KernelConfig {
  /** Vector load size */
  size_t load_size = 0;
  /** Vector store size to transposed output */
  size_t store_size = 0;

  /* Whether config is valid */
  bool valid = false;
  /* Number of CUDA blocks */
  size_t num_blocks = 0;

  /* Number of active SMs */
  size_t active_sm_count = 0;
  /* Elements per L1 cache load */
  size_t elements_per_load = 0;
  /* Elements per L1 cache store to cast output*/
  size_t elements_per_store_c = 0;
  /* Elements per L1 cache store to transposed output */
  size_t elements_per_store_t = 0;

51
  KernelConfig(size_t row_length, size_t num_rows, size_t itype_size, size_t otype_size,
52
               size_t load_size_, size_t store_size_, size_t sm_count)
53
      : load_size{load_size_}, store_size{store_size_} {
54
55
    // Check that tiles are correctly aligned
    constexpr size_t cache_line_size = 128;
56
57
    if (load_size % itype_size != 0 || store_size % otype_size != 0 ||
        cache_line_size % itype_size != 0 || cache_line_size % otype_size != 0) {
58
      return;
Przemek Tredak's avatar
Przemek Tredak committed
59
    }
60
61
    const size_t row_tile_elements = load_size * THREADS_PER_WARP / itype_size;
    const size_t col_tile_elements = store_size * THREADS_PER_WARP / otype_size;
62
    valid = (row_length % row_tile_elements == 0 && num_rows % col_tile_elements == 0);
63
64
    if (!valid) {
      return;
Przemek Tredak's avatar
Przemek Tredak committed
65
66
    }

67
68
69
70
71
    // Number of CUDA blocks
    num_blocks = (row_length / row_tile_elements) * (num_rows / col_tile_elements);

    // Parameters for performance model
    constexpr size_t warps_per_sm = 16;  // Rough estimate for saturated SMs
72
    active_sm_count = std::min(DIVUP(num_blocks * warps_per_tile, warps_per_sm), sm_count);
73
74
75
    elements_per_load = (std::min(cache_line_size, row_tile_elements * itype_size) / itype_size);
    elements_per_store_c = (std::min(cache_line_size, row_tile_elements * otype_size) / otype_size);
    elements_per_store_t = (std::min(cache_line_size, col_tile_elements * otype_size) / otype_size);
Przemek Tredak's avatar
Przemek Tredak committed
76
77
  }

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  /* Compare by estimated cost */
  bool operator<(const KernelConfig &other) const {
    if (this->valid && other.valid) {
      // cost ~ (1/elements_per_load
      //         + 1/elements_per_store_c
      //         + 1/elements_per_store_t) / active_sms
      // Note: Integer arithmetic ensures stable ordering
      const auto &l1 = this->elements_per_load;
      const auto &sc1 = this->elements_per_store_c;
      const auto &st1 = this->elements_per_store_t;
      const auto &p1 = this->active_sm_count;
      const auto &l2 = other.elements_per_load;
      const auto &sc2 = other.elements_per_store_c;
      const auto &st2 = other.elements_per_store_t;
      const auto &p2 = other.active_sm_count;
      const auto scale = l1 * sc1 * st1 * p1 * l2 * sc2 * st2 * p2;
94
95
      const auto cost1 = (scale / l1 + scale / sc1 + scale / st1) / p1;
      const auto cost2 = (scale / l2 + scale / sc2 + scale / st2) / p2;
96
97
98
99
      return cost1 < cost2;
    } else {
      return this->valid && !other.valid;
    }
Przemek Tredak's avatar
Przemek Tredak committed
100
  }
101
};
Przemek Tredak's avatar
Przemek Tredak committed
102

103
template <size_t load_size, size_t store_size, typename IType, typename OType>
104
105
106
107
108
__global__ void __launch_bounds__(block_size) cast_transpose_general_kernel(
    const IType *__restrict__ const input, const CType *__restrict__ const noop,
    OType *__restrict__ const output_c, OType *__restrict__ const output_t,
    const CType *__restrict__ const scale_ptr, CType *__restrict__ const amax_ptr,
    CType *__restrict__ const scale_inv_ptr, const size_t row_length, const size_t num_rows) {
109
110
  if (noop != nullptr && noop[0] == 1.0f) return;

111
112
113
  // Vectorized load/store sizes
  constexpr size_t nvec_in = load_size / sizeof(IType);
  constexpr size_t nvec_out = store_size / sizeof(OType);
Przemek Tredak's avatar
Przemek Tredak committed
114
  using IVec = Vec<IType, nvec_in>;
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  using OVecT = Vec<OType, nvec_out>;

  // Thread indices
  // Note: Block is interpreted as a warp_size x num_warps grid
  constexpr size_t bdimx = THREADS_PER_WARP;
  constexpr size_t bdimy = warps_per_tile;
  const size_t tid = threadIdx.x;
  const size_t tidx = tid % bdimx;
  const size_t tidy = tid / bdimx;
  const size_t bid = blockIdx.x;

  // Input tensors are divided into tiles
  // Note: Each tile is a warp_size x warp_size grid of nvec_out x nvec_in subtiles
  constexpr size_t tile_dim_m = THREADS_PER_WARP * nvec_out;
  constexpr size_t tile_dim_n = THREADS_PER_WARP * nvec_in;

  // Position of tile within tensor
  const size_t num_tiles_m = (num_rows + tile_dim_m - 1) / tile_dim_m;
  const size_t tile_id_m = bid % num_tiles_m;
  const size_t tile_id_n = bid / num_tiles_m;
  const size_t tile_row = tile_id_m * tile_dim_m;
  const size_t tile_col = tile_id_n * tile_dim_n;

  // Number of nvec_out x nvec_in subtiles for each thread to
  // load/store
  constexpr size_t num_iterations = THREADS_PER_WARP / warps_per_tile;

  // FP8 factors
  const CType scale = scale_ptr == nullptr ? 1 : *scale_ptr;
  CType amax = 0;

  // Load input and store to registers
  // Note: Each thread loads num_iterations subtiles, computes amax,
  // casts type, and transposes in registers.
  OVecT local_output_t[nvec_in][num_iterations];
150
#pragma unroll
151
152
153
  for (size_t iter = 0; iter < num_iterations; ++iter) {
    const size_t i1 = tidy + iter * bdimy;
    const size_t j1 = tidx;
154
#pragma unroll
155
156
157
158
    for (size_t i2 = 0; i2 < nvec_out; ++i2) {
      const size_t row = tile_row + i1 * nvec_out + i2;
      const size_t col = tile_col + j1 * nvec_in;
      if (row < num_rows) {
159
#pragma unroll
160
161
162
163
164
165
166
167
        for (size_t j2 = 0; j2 < nvec_in; ++j2) {
          if (col + j2 < row_length) {
            const CType in = input[row * row_length + col + j2];
            const OType out = OType(in * scale);
            __builtin_assume(amax >= 0);
            amax = fmaxf(fabsf(in), amax);
            output_c[row * row_length + col + j2] = out;
            local_output_t[j2][iter].data.elt[i2] = out;
Przemek Tredak's avatar
Przemek Tredak committed
168
169
          }
        }
170
      }
Przemek Tredak's avatar
Przemek Tredak committed
171
172
173
    }
  }

174
  // Copy transposed output from registers to global memory
175
176
  __shared__ OVecT shared_output_t[THREADS_PER_WARP][THREADS_PER_WARP + 1];
#pragma unroll
177
  for (size_t j2 = 0; j2 < nvec_in; ++j2) {
178
#pragma unroll
179
180
181
182
    for (size_t iter = 0; iter < num_iterations; ++iter) {
      const size_t i1 = tidy + iter * bdimy;
      const size_t j1 = tidx;
      shared_output_t[j1][i1] = local_output_t[j2][iter];
Przemek Tredak's avatar
Przemek Tredak committed
183
184
    }
    __syncthreads();
185
#pragma unroll
186
187
188
189
190
191
    for (size_t iter = 0; iter < num_iterations; ++iter) {
      const size_t i1 = tidx;
      const size_t j1 = tidy + iter * bdimy;
      const size_t row = tile_row + i1 * nvec_out;
      const size_t col = tile_col + j1 * nvec_in + j2;
      if (col < row_length) {
192
#pragma unroll
193
194
195
196
197
        for (size_t i2 = 0; i2 < nvec_out; ++i2) {
          if (row + i2 < num_rows) {
            output_t[col * num_rows + row + i2] = shared_output_t[j1][i1].data.elt[i2];
          }
        }
Przemek Tredak's avatar
Przemek Tredak committed
198
199
200
201
202
      }
    }
    __syncthreads();
  }

203
204
205
206
  // Reduce amax over block
  if (amax_ptr != nullptr) {
    amax = reduce_max<warps_per_tile>(amax, tidy);
    if (threadIdx.x == 0) {
207
      static_assert(std::is_same<CType, float>::value);
208
209
      atomicMaxFloat(amax_ptr, amax);
    }
Przemek Tredak's avatar
Przemek Tredak committed
210
  }
211
212
213
214
215

  // Update scale-inverse
  if (blockIdx.x == 0 && threadIdx.x == 0 && scale_inv_ptr != nullptr) {
    reciprocal<CType>(scale_inv_ptr, scale);
  }
Przemek Tredak's avatar
Przemek Tredak committed
216
217
}

218
219
}  // namespace

220
221
void cast_transpose(const Tensor &input, const Tensor &noop, Tensor *output_, cudaStream_t stream) {
  Tensor &output = *output_;
222

223
  CheckNoopTensor(noop, "cast_transpose_noop");
224
  CheckInputTensor(input, "cast_transpose_input");
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
  CheckOutputTensor(output, "cast_transpose_output");

  // Check that inputs and outputs are available
  NVTE_CHECK(input.has_data(), "Input is not allocated");
  NVTE_CHECK(output.has_data(), "Output rowwise data is not allocated");
  NVTE_CHECK(output.has_columnwise_data(), "Output columnwise is not allocated");

  // Flatten tensor to 2D
  NVTE_CHECK(input.data.shape == output.data.shape,
             "Input and output shapes do not match (input=", input.data.shape,
             ", output=", output.data.shape);
  const size_t row_length = input.flat_last_dim();
  const size_t num_rows = input.flat_first_dim();
  NVTE_CHECK(output.flat_first_dim() == num_rows && output.flat_last_dim() == row_length,
             "Invalid output dimensions (expected ", std::vector<size_t>{num_rows, row_length},
             ", got ", std::vector<size_t>{output.flat_first_dim(), output.flat_last_dim()}, ")");

  // Check that cast and transposed output data matches
  NVTE_CHECK(output.data.dtype == output.columnwise_data.dtype,
244
             "Cast and transposed output types must match.");
245
  NVTE_CHECK(output.scale_inv.dptr == output.columnwise_scale_inv.dptr,
246
             "Cast and transposed outputs need to share scale-inverse tensor.");
247

248
  TRANSFORMER_ENGINE_TYPE_SWITCH_INPUT(
249
      input.dtype(), InputType,
250
      TRANSFORMER_ENGINE_TYPE_SWITCH_OUTPUT(
251
          output.dtype(), OutputType,
252
253
254
          if (is_tensor_scaling(output.scaling_mode)) {
            // delayed scaling and current scaling are two variants of per-tensor scaling

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
            constexpr const char *itype_name = TypeInfo<InputType>::name;
            constexpr const char *otype_name = TypeInfo<OutputType>::name;
            constexpr size_t itype_size = sizeof(InputType);
            constexpr size_t otype_size = sizeof(OutputType);

            // Choose between runtime-compiled or statically-compiled kernel
            const bool aligned =
                (row_length % THREADS_PER_WARP == 0 && num_rows % THREADS_PER_WARP == 0);
            if (aligned && rtc::is_enabled()) {  // Runtime-compiled tuned kernel
              // Pick kernel config
              std::vector<KernelConfig> kernel_configs;
              kernel_configs.reserve(16);
              const size_t sm_count = static_cast<size_t>(cuda::sm_count());
              auto add_config = [&](size_t load_size, size_t store_size) {
                kernel_configs.emplace_back(row_length, num_rows, itype_size, otype_size, load_size,
                                            store_size, sm_count);
              };
              add_config(8, 8);
              add_config(4, 8);
              add_config(8, 4);
              add_config(4, 4);
              add_config(2, 8);
              add_config(8, 2);
              add_config(2, 4);
              add_config(4, 2);
              add_config(2, 2);
              add_config(1, 8);
              add_config(8, 1);
              add_config(1, 4);
              add_config(4, 1);
              add_config(1, 2);
              add_config(2, 1);
              add_config(1, 1);
              const auto &kernel_config =
                  *std::min_element(kernel_configs.begin(), kernel_configs.end());
              NVTE_CHECK(kernel_config.valid, "invalid kernel config");
              const size_t load_size = kernel_config.load_size;
              const size_t store_size = kernel_config.store_size;
              const size_t num_blocks = kernel_config.num_blocks;

              // Compile NVRTC kernel if needed and launch
              auto &rtc_manager = rtc::KernelManager::instance();
              const std::string kernel_label = concat_strings(
                  "cast_transpose"
                  ",itype=",
                  itype_name, ",otype=", otype_name, ",load_size=", load_size,
                  ",store_size=", store_size);
              if (!rtc_manager.is_compiled(kernel_label)) {
                std::string code = string_code_transpose_rtc_cast_transpose_cu;
                code = regex_replace(code, "__ITYPE__", itype_name);
                code = regex_replace(code, "__OTYPE__", otype_name);
                code = regex_replace(code, "__LOAD_SIZE__", load_size);
                code = regex_replace(code, "__STORE_SIZE__", store_size);
                code = regex_replace(code, "__WARPS_PER_TILE__", warps_per_tile);
                code = regex_replace(code, "__BLOCK_SIZE__", block_size);
                rtc_manager.compile(kernel_label, "cast_transpose_optimized_kernel", code,
                                    "transformer_engine/common/transpose/rtc/cast_transpose.cu");
              }
              rtc_manager.launch(kernel_label, num_blocks, block_size, 0, stream,
                                 static_cast<const InputType *>(input.data.dptr),
                                 reinterpret_cast<const CType *>(noop.data.dptr),
                                 static_cast<OutputType *>(output.data.dptr),
                                 static_cast<OutputType *>(output.columnwise_data.dptr),
                                 static_cast<const CType *>(output.scale.dptr),
                                 static_cast<CType *>(output.amax.dptr),
                                 static_cast<CType *>(output.scale_inv.dptr), row_length, num_rows);
            } else {  // Statically-compiled general kernel
              constexpr size_t load_size = 4;
              constexpr size_t store_size = 4;
              constexpr size_t row_tile_size = load_size / itype_size * THREADS_PER_WARP;
              constexpr size_t col_tile_size = store_size / otype_size * THREADS_PER_WARP;
              const int num_blocks =
                  (DIVUP(row_length, row_tile_size) * DIVUP(num_rows, col_tile_size));
328

329
330
331
332
333
334
335
336
337
              cast_transpose_general_kernel<load_size, store_size, InputType, OutputType>
                  <<<num_blocks, block_size, 0, stream>>>(
                      static_cast<const InputType *>(input.data.dptr),
                      reinterpret_cast<const CType *>(noop.data.dptr),
                      static_cast<OutputType *>(output.data.dptr),
                      static_cast<OutputType *>(output.columnwise_data.dptr),
                      static_cast<const CType *>(output.scale.dptr),
                      static_cast<CType *>(output.amax.dptr),
                      static_cast<CType *>(output.scale_inv.dptr), row_length, num_rows);
338
            }
339
340
          } else {
            NVTE_ERROR("Not implemented scaling mode: ", to_string(output.scaling_mode));
341
342
          });  // NOLINT(*)
  );           // NOLINT(*)
Przemek Tredak's avatar
Przemek Tredak committed
343
344
}

345
}  // namespace transformer_engine::detail
Przemek Tredak's avatar
Przemek Tredak committed
346

347
void nvte_cast_transpose(const NVTETensor input, NVTETensor output, cudaStream_t stream) {
348
  NVTE_API_CALL(nvte_cast_transpose);
Przemek Tredak's avatar
Przemek Tredak committed
349
  using namespace transformer_engine;
350
  auto noop = Tensor();
351
352
  transformer_engine::detail::cast_transpose(*convertNVTETensorCheck(input), noop,
                                             convertNVTETensor(output), stream);
353
354
}

355
void nvte_cast_transpose_with_noop(const NVTETensor input, const NVTETensor noop, NVTETensor output,
356
357
358
                                   cudaStream_t stream) {
  NVTE_API_CALL(nvte_cast_transpose_with_noop);
  using namespace transformer_engine;
359
360
361
  transformer_engine::detail::cast_transpose(*convertNVTETensorCheck(input),
                                             *convertNVTETensorCheck(noop),
                                             convertNVTETensor(output), stream);
Przemek Tredak's avatar
Przemek Tredak committed
362
}