gemm.cc 17.6 KB
Newer Older
1
2
3
4
5
6
7
8
/*!
 * \file tl/op/gemm.cc
 *
 * Define gemm operator.
 */

#include "gemm.h"

9
#include "builtin.h"
10
11
12
#include <tvm/tir/builtin.h>
#include <tvm/tir/op.h>
#include <tvm/tir/op_attr_types.h>
13
#include <tvm/tir/transform.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

#include "../target/utils.h"

namespace tvm {
namespace tl {

using namespace tir;

static std::vector<int> toPrimeFactors(int x) {
  int i = 2;
  std::vector<int> result;
  while (x > 1) {
    if (x % i == 0) {
      x /= i;
      result.push_back(i);
    } else {
      i++;
    }
  }
  return result;
}

Gemm::Gemm(Array<PrimExpr> args, BufferMap vmap) {
37
38
39
40
41
42
  Aptr = args[0];
  Bptr = args[1];
  Cptr = args[2];
  A = vmap[GetVarFromAccessPtr(Aptr)];
  B = vmap[GetVarFromAccessPtr(Bptr)];
  C = vmap[GetVarFromAccessPtr(Cptr)];
43
44
45
46
  trans_A = args[3].as<Bool>().value();
  trans_B = args[4].as<Bool>().value();
  M = args[5].as<IntImm>().value()->value;
  N = args[6].as<IntImm>().value()->value;
47
  K = args[7].as<IntImm>().value()->value;
48
  policy = static_cast<GemmWarpPolicy>(args[8].as<IntImm>().value()->value);
49
50
51
  clear_accum = args[9].as<Bool>().value();
  if (args.size() > 10) {
    kPack = args[10].as<IntImm>().value()->value;
52
53
54
55
    if (kPack != 1 && kPack != 2) {
      ICHECK(false) << "kPack must be 1 or 2";
    }
  }
56
57
  if (args.size() > 11) {
    wg_wait = args[11].as<IntImm>().value()->value;
58
  }
59
60
}

61
62
std::pair<int, int> Gemm::ComputeWarpPartition(int num_warps, Target target,
                                               bool maybe_hopper_wgmma) const {
63
  int m_warp = 1, n_warp = 1;
64
65
  constexpr int kMPerWarp = 16; // Rows processed by a single warp
  constexpr int kNPerWarp = 8;  // Columns processed by a single warp
66
67
  bool allow_wgmma = TargetIsHopper(target) && maybe_hopper_wgmma &&
                     (this->M >= 64) && (num_warps % 4 == 0);
68
69
70
71
  ICHECK(this->M % kMPerWarp == 0)
      << "M must be divisible by " << kMPerWarp << ", but got " << this->M;
  ICHECK(this->N % kNPerWarp == 0)
      << "N must be divisible by " << kNPerWarp << ", but got " << this->N;
72
  if (allow_wgmma) {
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    ICHECK(num_warps % 4 == 0) << "Warp-Group MMA requires 128×k threads.";

    constexpr int kGroup = 4; // Number of warps in a warp-group

    m_warp = kGroup; // Initially, only one warp-group on M dimension
    n_warp = num_warps / m_warp; // Rest all on N dimension

    if (this->policy == GemmWarpPolicy::kFullRow) {
      // Try to put as many warp-groups as possible on M dimension
      // (decreasing multiples of 4, ensuring divisibility by M)
      for (int cand = num_warps; cand >= kGroup; cand -= kGroup) {
        if (this->M % (cand * kMPerWarp) == 0) {
          m_warp = cand;
          n_warp = num_warps / m_warp;
          break;
        }
      }
90
    } else if (this->policy == GemmWarpPolicy::kFullCol) {
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
      // Try to use warps on N dimension; if N is not divisible, split excess
      // groups to M
      int cand_n = n_warp;                       // Initially assume all on N
      if (this->N % (cand_n * kNPerWarp) != 0) { // N direction division fails
        int max_n = this->N / kNPerWarp;
        // Find a feasible n_warp from max possible downwards, ensuring
        // num_warps/n_warp is multiple of 4
        for (int n = std::min(cand_n, max_n); n >= 1; --n) {
          if (num_warps % n == 0 && (num_warps / n) % kGroup == 0) {
            n_warp = n;
            m_warp = num_warps / n_warp;
            break;
          }
        }
      }
    } else if (this->policy == GemmWarpPolicy::kSquare) {
      // Exhaustive search, but m must be multiple of 4
      int max_m = this->M / kMPerWarp;
      int max_n = this->N / kNPerWarp;

      float ideal = this->N > 0 ? static_cast<float>(this->M) / this->N : 1.f;

      float best_score = std::numeric_limits<float>::max();
      int best_m = kGroup, best_n = n_warp;

      for (int m = kGroup; m <= num_warps && m <= max_m; m += kGroup) {
        if (num_warps % m)
          continue;
        int n = num_warps / m;
        if (n > max_n)
          continue;

        float m_per_warp = static_cast<float>(this->M) / (m * kMPerWarp);
        float n_per_warp = static_cast<float>(this->N) / (n * kNPerWarp);
        float score = std::abs(m_per_warp / n_per_warp - ideal);

        if (score < best_score) {
          best_score = score;
          best_m = m;
          best_n = n;
        }
      }
      m_warp = best_m;
      n_warp = best_n;
135
136
137
    } else {
      ICHECK(0) << "Unknown GemmWarpPolicy";
    }
138
139
140

    ICHECK(m_warp * n_warp == num_warps)
        << "m_warp * n_warp must equal num_warps";
141
142
    return {m_warp, n_warp};
  }
143

144
  if (this->policy == GemmWarpPolicy::kFullRow) {
145
    // Try to partition M first
146
    m_warp = num_warps;
147
148
149
150
    n_warp = 1;

    // If M cannot be evenly divided by m_warp*16, try to split remaining warps
    // to N
151
    if (this->M % (m_warp * kMPerWarp) != 0) {
152
      // Calculate how many warps we can use for M
153
      int max_m_warps = this->M / kMPerWarp;
154
155
156
157
158
159
      m_warp = max_m_warps;
      // Use remaining warps for N
      n_warp = num_warps / m_warp;
      if (n_warp == 0)
        n_warp = 1;
    }
160
  } else if (this->policy == GemmWarpPolicy::kFullCol) {
161
162
    // Try to partition N first
    m_warp = 1;
163
    n_warp = num_warps;
164
165
166

    // If N cannot be evenly divided by n_warp*8, try to split remaining warps
    // to M
167
    if (this->N % (n_warp * kNPerWarp) != 0) {
168
      // Calculate how many warps we can use for N
169
      int max_n_warps = this->N / kNPerWarp;
170
171
172
173
174
175
      n_warp = max_n_warps;
      // Use remaining warps for M
      m_warp = num_warps / n_warp;
      if (m_warp == 0)
        m_warp = 1;
    }
176
  } else if (this->policy == GemmWarpPolicy::kSquare) {
177
    // First calculate the maximum possible warps for each dimension
178
179
180
181
    int max_m_warps =
        this->M / kMPerWarp; // Each warp needs at least 16 elements in M
    int max_n_warps =
        this->N / kNPerWarp; // Each warp needs at least 8 elements in N
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

    // Calculate the ideal ratio of M/N warps based on the matrix dimensions
    float ideal_ratio = 1.0f;
    if (this->N > 0) {
      ideal_ratio = static_cast<float>(this->M) / this->N;
    }

    // Start with a balanced initial guess
    m_warp = 1;
    n_warp = 1;

    // Try to find the best balanced partition
    int best_m = 1;
    int best_n = 1;
    float best_balance = std::numeric_limits<float>::max();

    // Try all possible combinations that satisfy the constraints
    for (int m = 1; m <= max_m_warps && m <= num_warps; m++) {
      int n = num_warps / m;

      // Calculate how balanced this partition is
203
204
      float m_per_warp = static_cast<float>(this->M) / (m * kMPerWarp);
      float n_per_warp = static_cast<float>(this->N) / (n * kNPerWarp);
205
206
207
208
209
210
      float balance = std::abs(m_per_warp / n_per_warp - ideal_ratio);

      if (balance < best_balance) {
        best_balance = balance;
        best_m = m;
        best_n = n;
211
212
      }
    }
213
214
215

    m_warp = best_m;
    n_warp = best_n;
216
217
218
219
220
221
  } else {
    ICHECK(0) << "Unknown GemmWarpPolicy";
  }
  return {m_warp, n_warp};
}

222
223
224
225
bool Gemm::CheckWGMMA() const {
  if (C->dtype == DataType::Float(16)) {
    if (A->dtype == DataType::Float(16) && B->dtype == DataType::Float(16))
      return K % 16 == 0;
226
    else if (A->dtype.is_float8_e4m3() && B->dtype.is_float8_e4m3())
227
      return (!trans_A) && trans_B && K % 32 == 0;
228
    else if (A->dtype.is_float8_e4m3() && B->dtype.is_float8_e5m2())
229
      return (!trans_A) && trans_B && K % 32 == 0;
230
    else if (A->dtype.is_float8_e5m2() && B->dtype.is_float8_e4m3())
231
      return (!trans_A) && trans_B && K % 32 == 0;
232
    else if (A->dtype.is_float8_e5m2() && B->dtype.is_float8_e5m2())
233
234
235
236
237
238
239
240
241
242
243
      return (!trans_A) && trans_B && K % 32 == 0;
    else
      return false;
  } else if (C->dtype == DataType::Float(32)) {
    if (A->dtype == DataType::Float(16) && B->dtype == DataType::Float(16))
      return K % 16 == 0;
    else if (A->dtype == DataType::BFloat(16) &&
             B->dtype == DataType::BFloat(16))
      return K % 16 == 0;
    else if (A->dtype == DataType::Float(32) && B->dtype == DataType::Float(32))
      return (!trans_A) && trans_B && K % 8 == 0;
244
    else if (A->dtype.is_float8_e4m3() && B->dtype.is_float8_e4m3())
245
      return (!trans_A) && trans_B && K % 32 == 0;
246
    else if (A->dtype.is_float8_e4m3() && B->dtype.is_float8_e5m2())
247
      return (!trans_A) && trans_B && K % 32 == 0;
248
    else if (A->dtype.is_float8_e5m2() && B->dtype.is_float8_e4m3())
249
      return (!trans_A) && trans_B && K % 32 == 0;
250
    else if (A->dtype.is_float8_e5m2() && B->dtype.is_float8_e5m2())
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
      return (!trans_A) && trans_B && K % 32 == 0;
    else
      return false;
  } else if (C->dtype == DataType::Int(32)) {
    if (A->dtype == DataType::Int(8) && B->dtype == DataType::Int(8))
      return (!trans_A) && trans_B && K % 32 == 0;
    else if (A->dtype == DataType::Int(8) && B->dtype == DataType::UInt(8))
      return (!trans_A) && trans_B && K % 32 == 0;
    else if (A->dtype == DataType::UInt(8) && B->dtype == DataType::Int(8))
      return (!trans_A) && trans_B && K % 32 == 0;
    else if (A->dtype == DataType::UInt(8) && B->dtype == DataType::UInt(8))
      return (!trans_A) && trans_B && K % 32 == 0;
    else
      return false;
  } else {
    return false;
  }
}

270
Stmt Gemm::Lower(const LowerArgs &T, arith::Analyzer *analyzer) const {
271
272
273
274
  int warp_size = 32;
  if (TargetIsCDNA(T.target)) {
    warp_size = 64;
  }
275
  auto block_size = *as_const_int(T.thread_bounds->extent);
276
  bool maybe_wgmma = TargetIsHopper(T.target) && (this->M >= 64) &&
277
                     (block_size / warp_size % 4 == 0) && CheckWGMMA();
278

279
  auto [warp_m, warp_n] =
280
      ComputeWarpPartition(block_size / warp_size, T.target, maybe_wgmma);
281

282
283
284
285
286
287
288
289
290
291
292
  std::stringstream ss;
  std::string op_name = "tl::gemm_ss";
  if (A.scope() == "local.fragment") {
    ICHECK(B.scope() != "local.fragment");
    op_name = "tl::gemm_rs";
  } else if (B.scope() == "local.fragment") {
    op_name = "tl::gemm_sr";
  }
  ss << op_name << "<" << M << ", " << N << ", " << K << ", ";
  ss << warp_m << ", " << warp_n << ", ";
  ss << trans_A << ", " << trans_B;
293
  ss << ", " << clear_accum;
294
295
296
  if (TargetIsCDNA(T.target)) {
    // for cdna gemm, we need to specify kPack
    ss << ", " << kPack;
297
298
  } else if (TargetIsHopper(T.target)) {
    ss << ", " << (maybe_wgmma ? "true" : "false");
299
  }
300
301
302
  if (wg_wait != 0) {
    ss << ", " << wg_wait;
  }
303
304
305
306
307
308
309
  ss << ">";
  auto A_buffer = T.buffer_remap.count(A) ? T.buffer_remap[A] : A;
  auto B_buffer = T.buffer_remap.count(B) ? T.buffer_remap[B] : B;
  auto C_buffer = T.buffer_remap[C];

  Array<PrimExpr> new_args;
  new_args.push_back(StringImm(ss.str()));
310
311
312
  new_args.push_back(Aptr);
  new_args.push_back(Bptr);
  new_args.push_back(Cptr);
313
314
315
316
  auto new_call = Call(DataType::Handle(), builtin::call_extern(), new_args);
  return Evaluate(new_call);
}

317
318
319
LayoutMap Gemm::InferLayout(const LayoutInferArgs &T, InferLevel level) {
  if (completed_)
    return {};
320
321
  LayoutMap results;
  ICHECK(C.scope() == "local.fragment");
322
323
  auto thread_range = T.thread_bounds;
  auto block_size = *as_const_int(thread_range->extent);
324
325
  if (TargetIsVolta(T.target)) {
    const int warp_size = 32;
326
    auto [warp_m, warp_n] =
327
        ComputeWarpPartition(block_size / warp_size, T.target);
328
329
    auto fragment =
        makeGemmVoltaFragmentC(M, N, M / warp_m, N / warp_n, C->dtype.bits());
330
    results.Set(C, fragment->BindThreadRange(thread_range));
331
    if (A.scope() == "shared" || A.scope() == "shared.dyn") {
332
333
334
335
      int dim_A = A->shape.size();
      results.Set(A, makeGemmVoltaABLayout(*as_const_int(A->shape[dim_A - 2]),
                                           *as_const_int(A->shape[dim_A - 1]),
                                           true, trans_A ? 1 : 2));
336
337
    } else if (A.scope() == "local.fragment") {
      ICHECK(trans_A == false);
338
      auto fragment = makeGemmVoltaFragmentA(M, N, K, M / warp_m, N / warp_n);
339
      results.Set(A, fragment->BindThreadRange(thread_range));
340
341
342
343
344
    } else {
      ICHECK(0);
    }

    ICHECK(B.scope() == "shared" || B.scope() == "shared.dyn");
345
346
347
348
    int dim_B = B->shape.size();
    results.Set(B, makeGemmVoltaABLayout(*as_const_int(B->shape[dim_B - 2]),
                                         *as_const_int(B->shape[dim_B - 1]),
                                         false, trans_B ? 2 : 1));
349
350
  } else if (TargetIsAmpere(T.target) || TargetIsTuring(T.target)) {
    const int warp_size = 32;
351
    auto [warp_m, warp_n] =
352
        ComputeWarpPartition(block_size / warp_size, T.target);
353
354
    auto fragment =
        makeGemmFragmentC(M, N, M / warp_m, N / warp_n, C->dtype.bits());
355
    results.Set(C, fragment->BindThreadRange(thread_range));
356
357

    if (A.scope() == "shared" || A.scope() == "shared.dyn") {
358
359
360
      int dim_A = A->shape.size();
      const int64_t mat_stride = *as_const_int(A->shape[dim_A - 2]);
      const int64_t mat_continuous = *as_const_int(A->shape[dim_A - 1]);
361
362
363
      results.Set(A,
                  makeGemmABLayout(mat_stride, mat_continuous, mat_continuous,
                                   A->dtype.bits(), trans_A ? 1 : 2));
364
    } else if (A.scope() == "local.fragment") {
365
366
      auto fragment = makeGemmFragmentA(M, N, K, M / warp_m, N / warp_n,
                                        A->dtype.bits(), trans_A);
367
      results.Set(A, fragment->BindThreadRange(thread_range));
368
369
370
371
    } else {
      ICHECK(0);
    }
    if (B.scope() == "shared" || B.scope() == "shared.dyn") {
372
373
374
      int dim_B = B->shape.size();
      const int64_t mat_stride = *as_const_int(B->shape[dim_B - 2]);
      const int64_t mat_continuous = *as_const_int(B->shape[dim_B - 1]);
375
376
377
      results.Set(B,
                  makeGemmABLayout(mat_stride, mat_continuous, mat_continuous,
                                   B->dtype.bits(), trans_B ? 2 : 1));
378
    } else if (B.scope() == "local.fragment") {
379
380
      auto fragment =
          makeGemmFragmentB(M, N, K, M / warp_m, N / warp_n, trans_B);
381
      results.Set(B, fragment->BindThreadRange(thread_range));
382
383
384
385
386
    } else {
      ICHECK(0);
    }
  } else if (TargetIsHopper(T.target)) {
    const int warp_size = 32;
387
388
    bool maybe_wgmma =
        (this->M >= 64) && (block_size / warp_size % 4 == 0) && CheckWGMMA();
389
    auto [warp_m, warp_n] =
390
        ComputeWarpPartition(block_size / warp_size, T.target, maybe_wgmma);
391
    auto fragment =
392
393
394
395
        maybe_wgmma
            ? makeGemmFragmentCHopper(M, N, M / warp_m, N / warp_n,
                                      C->dtype.bits())
            : makeGemmFragmentC(M, N, M / warp_m, N / warp_n, C->dtype.bits());
396
    results.Set(C, fragment->BindThreadRange(thread_range));
397
    if (A.scope() == "shared" || A.scope() == "shared.dyn") {
398
399
400
      int dim_A = A->shape.size();
      const int64_t mat_stride = *as_const_int(A->shape[dim_A - 2]);
      const int64_t mat_continuous = *as_const_int(A->shape[dim_A - 1]);
401
      const int64_t continuity =
402
          trans_A ? 4 * mat_continuous / warp_m : mat_continuous;
403
404
405
406
407
408
409
      auto ABLayout =
          maybe_wgmma
              ? makeGemmABLayoutHopper(mat_stride, mat_continuous, continuity,
                                       A->dtype.bits(), trans_A ? 1 : 2)
              : makeGemmABLayout(mat_stride, mat_continuous, mat_continuous,
                                 A->dtype.bits(), trans_A ? 1 : 2);
      results.Set(A, ABLayout);
410
    } else {
411
412
      auto fragment = makeGemmFragmentA(M, N, K, M / warp_m, N / warp_n,
                                        A->dtype.bits(), trans_A);
413
      results.Set(A, fragment->BindThreadRange(thread_range));
414
415
    }
    if (B.scope() == "shared" || B.scope() == "shared.dyn") {
416
417
418
      int dim_B = B->shape.size();
      const int64_t mat_stride = *as_const_int(B->shape[dim_B - 2]);
      const int64_t mat_continuous = *as_const_int(B->shape[dim_B - 1]);
419
420
      const int64_t continuity =
          trans_B ? mat_continuous : mat_continuous / warp_n;
421
422
423
424
425
426
427
      auto ABLayout =
          maybe_wgmma
              ? makeGemmABLayoutHopper(mat_stride, mat_continuous, continuity,
                                       B->dtype.bits(), trans_B ? 2 : 1)
              : makeGemmABLayout(mat_stride, mat_continuous, mat_continuous,
                                 B->dtype.bits(), trans_B ? 2 : 1);
      results.Set(B, ABLayout);
428
429
430
431
432
    } else {
      ICHECK(0) << "WGMMA only support B in shared.";
    }
  } else if (TargetIsCDNA(T.target)) {
    const int warp_size = 64;
433
    auto [warp_m, warp_n] =
434
        ComputeWarpPartition(block_size / warp_size, T.target);
435

436
437
    auto fragment =
        makeGemmFragmentCCDNA(M, N, M / warp_m, N / warp_n, C->dtype.bits());
438
    results.Set(C, fragment->BindThreadRange(thread_range));
439
440

    if (A.scope() == "shared" || A.scope() == "shared.dyn") {
441
442
443
444
      int dim_A = A->shape.size();
      auto shared_layout = makeGemmABLayoutCDNA(
          *as_const_int(A->shape[dim_A - 2]),
          *as_const_int(A->shape[dim_A - 1]), A->dtype.bits(), kPack);
445
446
      results.Set(A, shared_layout);
    } else if (A.scope() == "local.fragment") {
447
448
      auto fragment = makeGemmFragmentACDNA(M, N, K, M / warp_m, N / warp_n,
                                            A->dtype.bits(), trans_A);
449
      results.Set(A, fragment->BindThreadRange(thread_range));
450
451
452
453
    } else {
      ICHECK(0);
    }
    if (B.scope() == "shared" || B.scope() == "shared.dyn") {
454
455
456
457
      int dim_B = B->shape.size();
      auto shared_layout = makeGemmABLayoutCDNA(
          *as_const_int(B->shape[dim_B - 2]),
          *as_const_int(B->shape[dim_B - 1]), B->dtype.bits(), kPack);
458
459
460

      results.Set(B, shared_layout);
    } else if (B.scope() == "local.fragment") {
461
462
      auto fragment =
          makeGemmFragmentB(M, N, K, M / warp_m, N / warp_n, trans_B);
463
      results.Set(B, fragment->BindThreadRange(thread_range));
464
465
466
467
468
469
470
471
472
473
474
475
    } else {
      ICHECK(0);
    }
  } else {
    ICHECK(0) << "Not supported " << T.target->str();
  }
  completed_ = true;
  return results;
}

TIR_REGISTER_TL_OP(Gemm, gemm)
    .set_num_inputs(5)
476
477
    .set_attr<TCallEffectKind>("TCallEffectKind",
                               Integer(CallEffectKind::kOpaque));
478

479
480
} // namespace tl
} // namespace tvm