lower_tile_op.cc 27.3 KB
Newer Older
1
2
3
4
5
/*!
 * \file lower_tile_op.cc
 * \brief Lower the tile op for further codegen.
 */

6
#include <tvm/ffi/reflection/registry.h>
7
#include <tvm/tir/builtin.h>
8
#include <tvm/tir/op.h>
9
10
11
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/transform.h>
#include <tvm/tir/utils.h>
12
#include <unordered_map>
13
14
15

#include "../layout/layout.h"
#include "../layout/utils.h"
16
#include "../op/builtin.h"
17
18
#include "../op/gemm.h"
#include "../op/gemm_sp.h"
19
#include "../op/operator.h"
20

21
#include "arith/ir_mutator_with_analyzer.h"
22
23
24
25
26
27
28
#include "loop_partition.h"

namespace tvm {
namespace tl {

using namespace tir;

29
30
static Buffer makeBufferWithLayout(const Buffer &buffer, const Layout &layout,
                                   Map<Var, Var> &var_remap) {
31
32
  const auto *ptr_type =
      TVM_TYPE_AS(buffer->data->type_annotation, PointerTypeNode);
33
34
35
36
37
38
39
40
41
42
43
  Type new_type;
  // convert fragments to normal local buffer
  if (ptr_type->storage_scope == "local.fragment") {
    new_type = PointerType(ptr_type->element_type, "local");
  } else {
    new_type = buffer->data->type_annotation;
  }
  Var new_var;
  if (ptr_type->storage_scope == "global") {
    new_var = buffer->data;
  } else {
44
45
46
47
48
49
    if (var_remap.count(buffer->data)) {
      new_var = var_remap[buffer->data];
    } else {
      new_var = Var(buffer->data->name_hint, new_type);
      var_remap.Set(buffer->data, new_var);
    }
50
  }
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
  Array<PrimExpr> layout_shape = layout->OutputShape();
  Array<PrimExpr> output_shape = layout_shape;

  if (ptr_type->storage_scope == "shared" ||
      ptr_type->storage_scope == "shared.dyn") {
    int replicate_extent = 1;
    Array<PrimExpr> buffer_shape = buffer->shape;
    int buffer_extent = 1;
    int layout_extent = 1;
    for (size_t i = 0; i < buffer_shape.size(); i++) {
      auto shape = buffer_shape[i].as<IntImmNode>();
      buffer_extent *= shape->value;
    }
    for (size_t i = 0; i < layout_shape.size(); i++) {
      auto shape = layout_shape[i].as<IntImmNode>();
      layout_extent *= shape->value;
    }
    replicate_extent = buffer_extent / layout_extent;
    if (replicate_extent > 1) {
      output_shape.insert(output_shape.begin(), replicate_extent);
    }
  }
  return Buffer(new_var, buffer->dtype, output_shape, {}, buffer->elem_offset,
                buffer->name, buffer->data_alignment, buffer->offset_factor,
                buffer->buffer_type);
76
77
}

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// The function `makeBufferWithLayout` creates a new Buffer object based on the
// given buffer and layout. It handles remapping of buffer variables, adjusts
// the storage scope if needed (e.g., from "local.fragment" to "local"), and
// computes the output shape according to the layout. For shared memory buffers,
// it also handles replication if the buffer's extent is larger than the
// layout's extent.
class LayoutRemapRewriter : public arith::IRMutatorWithAnalyzer {
public:
  static Stmt Substitute(Stmt stmt, Map<Buffer, Layout> layout_remap) {
    arith::Analyzer analyzer;
    LayoutRemapRewriter substituter(&analyzer);
    substituter.layout_remap_ = std::move(layout_remap);
    return substituter.VisitStmt(stmt);
  }

private:
  using arith::IRMutatorWithAnalyzer::IRMutatorWithAnalyzer;

  Stmt VisitStmt_(const BlockNode *op) final {
    auto block = Downcast<Block>(arith::IRMutatorWithAnalyzer::VisitStmt_(op));
    if (op->annotations.count(attr::kLayoutMap)) {
      block.CopyOnWrite()->annotations.Set(attr::kLayoutMap, layout_remap_);
    }
    return block;
  }

  Map<Buffer, Layout> layout_remap_;
};
106
107
108
109
110
111
class BufferGemmCollector : public StmtExprVisitor {
public:
  BufferGemmCollector() { Clear(); }

  void Clear() { buffer_var_gemm_.clear(); }

112
  void Collect(const Stmt &stmt) { VisitStmt(stmt); }
113
114
115
116
117
118

  Array<Var> GetBufferVarGemm() { return buffer_var_gemm_; }

private:
  void VisitStmt_(const EvaluateNode *op) {
    auto call = Downcast<Call>(op->value);
119
    if (call->op.same_as(Gemm::Get())) {
120
121
122
123
124
125
126
127
128
129
130
131
      auto srcA_buffer_access_ptr = Downcast<Call>(call->args[0]);
      ICHECK(srcA_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto srcA_buffer_var = Downcast<Var>(srcA_buffer_access_ptr->args[1]);
      auto srcB_buffer_access_ptr = Downcast<Call>(call->args[1]);
      ICHECK(srcB_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto srcB_buffer_var = Downcast<Var>(srcB_buffer_access_ptr->args[1]);
      auto dst_buffer_access_ptr = Downcast<Call>(call->args[2]);
      ICHECK(dst_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto dst_buffer_var = Downcast<Var>(dst_buffer_access_ptr->args[1]);
      buffer_var_gemm_.push_back(srcA_buffer_var);
      buffer_var_gemm_.push_back(srcB_buffer_var);
      buffer_var_gemm_.push_back(dst_buffer_var);
132
    } else if (call->op.same_as(GemmSP::Get())) {
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
      auto srcA_buffer_access_ptr = Downcast<Call>(call->args[0]);
      ICHECK(srcA_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto srcA_buffer_var = Downcast<Var>(srcA_buffer_access_ptr->args[1]);
      auto srcB_buffer_access_ptr = Downcast<Call>(call->args[1]);
      ICHECK(srcB_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto srcB_buffer_var = Downcast<Var>(srcB_buffer_access_ptr->args[1]);
      auto dst_buffer_access_ptr = Downcast<Call>(call->args[2]);
      ICHECK(dst_buffer_access_ptr->op.same_as(builtin::tvm_access_ptr()));
      auto dst_buffer_var = Downcast<Var>(dst_buffer_access_ptr->args[1]);
      buffer_var_gemm_.push_back(srcA_buffer_var);
      buffer_var_gemm_.push_back(srcB_buffer_var);
      buffer_var_gemm_.push_back(dst_buffer_var);
    }
  }

  Array<Var> buffer_var_gemm_;
};

151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/*!
 * \brief A class that rewrites buffer references in a statement based on a
 * given buffer remapping.
 *
 * This class is used to update buffer references in a statement after buffer
 * transformations have been applied. It specifically handles the remapping of
 * padding annotations.
 */
class RemapBufferRewriter : public arith::IRMutatorWithAnalyzer {
public:
  /*!
   * \brief Substitute buffer references in a statement based on a given buffer
   * remapping. \param stmt The statement to rewrite. \param buffer_remap A map
   * from old buffers to new buffers. \return The rewritten statement.
   */
166
  static Stmt Substitute(const Stmt &stmt, Map<Buffer, Buffer> buffer_remap) {
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
    arith::Analyzer analyzer;
    RemapBufferRewriter substituter(&analyzer);
    substituter.buffer_remap_ = std::move(buffer_remap);
    return substituter.VisitStmt(stmt);
  }

private:
  using arith::IRMutatorWithAnalyzer::IRMutatorWithAnalyzer;

  Stmt VisitStmt_(const BlockNode *op) final {
    if (op->annotations.count(attr::kPaddingMap)) {
      return RewritePaddingMap(op);
    }
    return IRMutatorWithAnalyzer::VisitStmt_(op);
  }

  /*!
   * \brief Rewrite the padding map annotation of a block.
   * \param op The block node to rewrite.
   * \return The rewritten block.
   */
  Stmt RewritePaddingMap(const BlockNode *op) {
189
190
191
192
    auto padding_map = op->annotations.Get(attr::kPaddingMap);
    if (!padding_map) {
      LOG(FATAL) << "Padding map annotation is missing";
    }
193
194

    Map<Var, Var> var_remap = CreateVarRemap();
195
196
    Map<Var, PrimExpr> new_padding_map = RemapPaddingMap(
        Downcast<Map<Var, PrimExpr>>(padding_map.value()), var_remap);
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237

    auto block = Downcast<Block>(IRMutatorWithAnalyzer::VisitStmt_(op));
    auto block_ptr = block.CopyOnWrite();
    block_ptr->annotations.Set(attr::kPaddingMap, new_padding_map);
    return block;
  }

  /*!
   * \brief Create a mapping from old variables to new variables based on buffer
   * remapping. \return A map from old variables to new variables.
   */
  Map<Var, Var> CreateVarRemap() const {
    Map<Var, Var> var_remap;
    for (const auto &[buffer, buffer_remap] : buffer_remap_) {
      var_remap.Set(buffer->data, buffer_remap->data);
    }
    return var_remap;
  }

  /*!
   * \brief Remap the padding map using the variable remapping.
   * \param padding_map The original padding map.
   * \param var_remap The variable remapping.
   * \return The remapped padding map.
   */
  Map<Var, PrimExpr> RemapPaddingMap(const Map<Var, PrimExpr> &padding_map,
                                     const Map<Var, Var> &var_remap) const {
    Map<Var, PrimExpr> new_padding_map;
    for (const auto &[var, padding] : padding_map) {
      if (var_remap.count(var)) {
        new_padding_map.Set(var_remap.at(var), padding);
      } else {
        new_padding_map.Set(var, padding);
      }
    }
    return new_padding_map;
  }

  Map<Buffer, Buffer> buffer_remap_;
};

238
class LowerTileOpPass : arith::IRMutatorWithAnalyzer {
239
public:
240
241
242
243
244
  static PrimFunc Substitute(PrimFunc f) {
    arith::Analyzer analyzer;
    LowerTileOpPass substituter(&analyzer);
    // Trace the buffer map for tvm_access_ptr
    substituter.buffer_map_.insert(f->buffer_map.begin(), f->buffer_map.end());
245
    for (const auto &[_, buffer] : f->buffer_map) {
246
247
248
249
250
      substituter.buffer_data_to_buffer_.Set(buffer->data, buffer);
    }
    auto target = f->GetAttr<Target>(tvm::attr::kTarget);
    ICHECK(target.defined()) << "LowerTileOpPass: Require the target attribute";
    substituter.target_ = target.value();
251
252
253
254
255
    // For TMA 1D, we should collect the buffers which are not used in GEMM and
    // do not need swizzle
    BufferGemmCollector collector;
    collector.Collect(f->body);
    substituter.buffer_var_gemm_ = collector.GetBufferVarGemm();
256
    PrimFuncNode *fptr = f.CopyOnWrite();
257
    fptr->body = substituter.VisitStmt(f->body);
258
259
    fptr->body =
        RemapBufferRewriter::Substitute(fptr->body, substituter.buffer_remap_);
260
261
    fptr->body =
        LayoutRemapRewriter::Substitute(fptr->body, substituter.layout_remap_);
262
263
264
265
266
267
268
269
270
    tvm::transform::PassContext ctxt = tvm::transform::PassContext::Current();
    Optional<Bool> opt_disable_tma_lower =
        ctxt->GetConfig(kDisableTMALower, Optional<Bool>());

    if (!opt_disable_tma_lower.value_or(Bool(false))) {
      // @lei: this is a workaround, as if we don't disable tma lower,
      // cp async lowering won't be generated.
      ctxt->config.Set(kDisableTMALower, Bool(!substituter.has_tma_));
    }
271
272
273
    return f;
  }

274
private:
275
276
  using arith::IRMutatorWithAnalyzer::IRMutatorWithAnalyzer;

277
  Stmt VisitStmt_(const BlockNode *op) final {
278
279
280
281
282
283
284
285
286
287
288
289
    // Record the mapping from buffer data var to buffer for later lookup
    for (auto buffer : op->alloc_buffers) {
      buffer_map_.insert({buffer->data, buffer});
    }
    for (auto match_buffer : op->match_buffers) {
      buffer_map_.insert({match_buffer->buffer->data, match_buffer->buffer});
    }
    for (auto buffer : op->alloc_buffers) {
      buffer_data_to_buffer_.Set(buffer->data, buffer);
    }
    Map<Var, Layout> vmap;
    if (op->annotations.count(attr::kLayoutMap)) {
290
291
292
      auto layout_map = op->annotations.at(attr::kLayoutMap)
                            .as<Map<Buffer, Layout>>()
                            .value();
293
      for (auto [buffer, layout] : layout_map) {
294
295
        buffer_remap_.Set(buffer,
                          makeBufferWithLayout(buffer, layout, var_remap_));
296
297
298
299
300
301
302
303
304
305
306
        layout_map_.Set(buffer, layout);
      }
    }
    auto block = Downcast<Block>(arith::IRMutatorWithAnalyzer::VisitStmt_(op));
    auto block_ptr = block.CopyOnWrite();
    for (size_t i = 0; i < block->alloc_buffers.size(); i++) {
      auto buffer = block->alloc_buffers[i];
      if (buffer_remap_.count(buffer)) {
        block_ptr->alloc_buffers.Set(i, buffer_remap_[buffer]);
      }
    }
307
308
    for (const auto &buffer : workspaces_)
      block_ptr->alloc_buffers.push_back(buffer);
309
310
311
312
    workspaces_.clear();
    return block;
  }

313
  int CheckAndGetBufferRowSize(const Buffer &buffer) {
314
    CHECK(buffer->shape.size() >= 2)
315
316
        << "The dimension of Buffer \"" << buffer->name << "\" with shape "
        << buffer->shape << " should be at least 2";
317
318
319
320
321
322

    auto dim = buffer->shape.size();
    auto buffer_row_size = buffer->shape[dim - 1].as<IntImmNode>()->value;
    return buffer_row_size;
  }

323
324
325
326
327
328
  struct AccessPtrResult {
    PrimExpr expr;
    bool rewritten{false};
  };

  AccessPtrResult
329
330
331
  HandleAccessPtrAndOffset(const PrimExpr &access_ptr,
                           const Optional<PrimExpr> &offset = std::nullopt,
                           DataType dtype = DataType::Int(32)) {
332
    AccessPtrResult result{access_ptr, false};
333
334
    // The 2th arg of T.tvm_access_ptr call is offset, we set it to 0 and
    // accumulate it to smem_offset
335
336
337
338
339
340
    CHECK(access_ptr->IsInstance<CallNode>())
        << "Invalid access ptr for permuted layout: " << access_ptr;
    auto access_ptr_call = Downcast<Call>(access_ptr);
    if (access_ptr_call->op.same_as(builtin::tvm_access_ptr())) {
      LOG(FATAL) << "Transformation for tvm_access_ptr is not implemented yet";
    } else if (access_ptr_call->op.same_as(builtin::address_of())) {
341
342
343
344
345
346
347
348
349
350
      Optional<PrimExpr> resolved = ResolveBufferLoad(access_ptr_call->args[0]);
      ICHECK(resolved.defined())
          << "Invalid access op for permuted layout: " << access_ptr;
      PrimExpr load_expr = resolved.value();
      if (!load_expr.same_as(access_ptr_call->args[0])) {
        auto node = access_ptr_call.CopyOnWrite();
        node->args.Set(0, load_expr);
        access_ptr_call = Call(access_ptr_call->dtype, access_ptr_call->op,
                               {load_expr}, access_ptr_call->span);
      }
351
352
      BufferLoad load = Downcast<BufferLoad>(access_ptr_call->args[0]);
      Array<PrimExpr> indices = load->indices;
353
      Array<PrimExpr> old_shape = load->buffer->shape;
354

355
      CHECK_EQ(indices.size(), old_shape.size())
356
357
358
          << "Indices size and shape size must match for general N-dimensional "
             "buffer "
          << "but got indices size: " << indices.size()
359
          << " and shape size: " << old_shape.size();
360
361
362
363

      PrimExpr elem_offset = 0;
      PrimExpr stride = 1;

364
      for (int i = static_cast<int>(old_shape.size()) - 1; i >= 0; --i) {
365
        elem_offset += indices[i] * stride;
366
        stride *= old_shape[i];
367
368
      }

369
370
      PrimExpr smem_offset =
          elem_offset + (offset.defined() ? offset.value() : 0);
371

372
373
374
375
376
377
378
379
      Buffer remap_key = FindRemapBuffer(load->buffer).value_or(load->buffer);
      Optional<Layout> layout = FindLayout(remap_key);
      if (!layout.defined() || !buffer_map_.count(remap_key->data)) {
        return result;
      }
      auto new_buffer = buffer_remap_.count(remap_key)
                            ? buffer_remap_[remap_key]
                            : load->buffer;
380
      auto new_shape = new_buffer->shape;
381

382
      auto buffer_map_iter = buffer_map_.find(Downcast<Var>(remap_key->data));
383
384
385
386
387
388
389
390

      int buffer_row_size = CheckAndGetBufferRowSize(buffer_map_iter->second);
      (void)buffer_row_size;

      // Convert offset to target-dimension, reindex it and convert it back
      Array<PrimExpr> multi_dim_indices;
      PrimExpr remaining_offset = smem_offset;

391
      for (int i = static_cast<int>(old_shape.size()) - 1; i >= 0; --i) {
392
        multi_dim_indices.insert(multi_dim_indices.begin(),
393
394
                                 floormod(remaining_offset, old_shape[i]));
        remaining_offset = floordiv(remaining_offset, old_shape[i]);
395
396
      }

397
      auto forward_indices = layout.value()->Forward(multi_dim_indices);
398
399
      PrimExpr new_offset = 0;
      PrimExpr stride_offset = 1;
400
      for (int i = static_cast<int>(new_shape.size()) - 1; i >= 0; --i) {
401
        new_offset += forward_indices[i] * stride_offset;
402
        stride_offset *= new_shape[i];
403
404
405
406
      }
      new_offset = analyzer_->Simplify(new_offset);

      Array<PrimExpr> new_indices;
407
408
409
410
      for (int i = static_cast<int>(new_shape.size()) - 1; i >= 0; --i) {
        new_indices.insert(new_indices.begin(),
                           floormod(new_offset, new_shape[i]));
        new_offset = floordiv(new_offset, new_shape[i]);
411
412
      }

413
414
415
416
417
418
419
420
      Array<PrimExpr> new_args = {BufferLoad(new_buffer, new_indices)};
      if (buffer_remap_.count(remap_key)) {
        layout_remap_.Set(new_buffer, layout.value());
      }
      result.rewritten = true;
      result.expr = Call(access_ptr_call->dtype, access_ptr_call->op, new_args,
                         access_ptr_call->span);
      return result;
421
422
423
424
    } else {
      LOG(FATAL) << "Invalid access op for permuted layout: " << access_ptr;
    }

425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
    return result;
  }

  Optional<PrimExpr> ResolveBufferLoad(const PrimExpr &expr) const {
    if (expr->IsInstance<BufferLoadNode>()) {
      return expr;
    }
    if (const auto *var_node = expr.as<VarNode>()) {
      Var var = GetRef<Var>(var_node);
      auto it = let_bindings_.find(var);
      if (it != let_bindings_.end()) {
        return it->second;
      }
    }
    return Optional<PrimExpr>();
  }

  Optional<Buffer> FindRemapBuffer(const Buffer &buffer) const {
    if (buffer_remap_.count(buffer)) {
      return buffer;
    }
    auto it = buffer_map_.find(buffer->data);
    if (it != buffer_map_.end() && buffer_remap_.count(it->second)) {
      return it->second;
    }
    for (const auto &kv : buffer_remap_) {
      if (kv.first->data.same_as(buffer->data)) {
        return kv.first;
      }
      if (kv.first->name == buffer->name) {
        return kv.first;
      }
    }
    return Optional<Buffer>();
  }

  Optional<Layout> FindLayout(const Buffer &buffer) const {
    if (layout_map_.count(buffer)) {
      return layout_map_[buffer];
    }
    auto it = buffer_map_.find(buffer->data);
    if (it != buffer_map_.end() && layout_map_.count(it->second)) {
      return layout_map_[it->second];
    }
    for (const auto &kv : layout_map_) {
      if (kv.first->data.same_as(buffer->data)) {
        return kv.second;
      }
      if (kv.first->name == buffer->name) {
        return kv.second;
      }
    }
    return Optional<Layout>();
478
479
  }

480
  PrimExpr VisitExpr_(const tir::CallNode *op) final {
481
482
483
484
485
    if ((!has_tma_) && (op->op.same_as(tl::tma_load()) ||
                        op->op.same_as(tl::tma_load_im2col()) ||
                        op->op.same_as(tl::tma_store()))) {
      has_tma_ = true;
    }
486
    Array<RelaxExpr> ptx_instructions = {builtin::ptx_ldmatrix(),
487
488
489
490
491
492
                                         builtin::mma_store()};

    if (std::find(ptx_instructions.begin(), ptx_instructions.end(), op->op) ==
        ptx_instructions.end()) {
      auto call = Downcast<Call>(IRMutatorWithAnalyzer::VisitExpr_(op));
      return call;
493
494
495
496
497
498
499
500
501
    } else {
      is_ptx_ = true;
    }
    // Rewrite from/to shared or shared.dyn to/from local
    auto call = Downcast<Call>(IRMutatorWithAnalyzer::VisitExpr_(op));
    if (call->op.same_as(builtin::ptx_ldmatrix())) {
      // form: T.ptx_ldmatrix(..., smem_ptr, smem_offset)
      // smem_ptr: T.tvm_access_ptr(ptype, data, offset, extent, rw_mask)
      // or T.address_of(buffer, offset)
502
      PrimExpr access_ptr = call->args[5];
503
504
505
506
507
      PrimExpr smem_offset = call->args[6];
      Call address_of_call = Downcast<Call>(access_ptr);
      if (!address_of_call->op.same_as(builtin::address_of())) {
        LOG(FATAL) << "Invalid access ptr for permuted layout: " << access_ptr;
      }
508
509
510
511
512
513
514
515
516
517
518
519
      Optional<PrimExpr> resolved = ResolveBufferLoad(address_of_call->args[0]);
      ICHECK(resolved.defined())
          << "Invalid address_of argument for permuted layout: "
          << address_of_call->args[0];
      PrimExpr load_expr = resolved.value();
      if (!load_expr.same_as(address_of_call->args[0])) {
        auto call_node = call.CopyOnWrite();
        call_node->args.Set(5, Call(address_of_call->dtype, address_of_call->op,
                                    {load_expr}, address_of_call->span));
        address_of_call = Downcast<Call>(call->args[5]);
        access_ptr = call->args[5];
      }
520
      BufferLoad load = Downcast<BufferLoad>(address_of_call->args[0]);
521
522
523
      auto new_access_ptr =
          HandleAccessPtrAndOffset(access_ptr, smem_offset, call->dtype);
      if (new_access_ptr.rewritten) {
524
        auto new_call = call.CopyOnWrite();
525
        new_call->args.Set(5, new_access_ptr.expr);
526
527
528
        new_call->args.Set(6, IntImm(smem_offset->dtype, 0));
      }
    } else if (call->op.same_as(builtin::mma_store())) {
529
530
      // because we will directly store result to Buffer instead of calling
      // mma_store now
531
      auto access_ptr = call->args[2];
532
      auto new_access_ptr =
533
          HandleAccessPtrAndOffset(access_ptr, std::nullopt, call->dtype);
534
535
536
537
      if (new_access_ptr.rewritten) {
        auto new_call = call.CopyOnWrite();
        new_call->args.Set(2, new_access_ptr.expr);
      }
538
539
540
541
542
543
544
    } else {
      LOG(FATAL) << "Invalid call node: " << call;
    }
    is_ptx_ = false;
    return call;
  }

545
  PrimExpr VisitExpr_(const BufferLoadNode *op) final {
546
547
548
549
    auto load = Downcast<BufferLoad>(IRMutatorWithAnalyzer::VisitExpr_(op));
    if (is_ptx_) {
      return load;
    }
550
551
552
    auto buffer = load->buffer;
    if (buffer_remap_.count(buffer)) {
      auto new_indices = layout_map_[buffer]->Forward(load->indices);
553
      auto new_buffer = buffer_remap_[load->buffer];
554
      layout_remap_.Set(new_buffer, layout_map_[load->buffer]);
555
      return BufferLoad(new_buffer, new_indices);
556
557
558
559
560
561
    } else if (var_remap_.count(buffer->data)) {
      auto new_buffer = Buffer(
          var_remap_[buffer->data], buffer->dtype, buffer->shape,
          buffer->strides, buffer->elem_offset, buffer->name,
          buffer->data_alignment, buffer->offset_factor, buffer->buffer_type);
      return BufferLoad(new_buffer, load->indices);
562
563
564
565
    }
    return load;
  }

566
  Stmt VisitStmt_(const BufferStoreNode *op) final {
567
    auto store = Downcast<BufferStore>(IRMutatorWithAnalyzer::VisitStmt_(op));
568
569
570
    auto buffer = store->buffer;
    if (buffer_remap_.count(buffer)) {
      auto new_indices = layout_map_[buffer]->Forward(store->indices);
571
      auto new_buffer = buffer_remap_[store->buffer];
572
      layout_remap_.Set(new_buffer, layout_map_[store->buffer]);
573
      return BufferStore(new_buffer, store->value, new_indices);
574
575
576
577
578
579
    } else if (var_remap_.count(buffer->data)) {
      auto new_buffer = Buffer(
          var_remap_[buffer->data], buffer->dtype, buffer->shape,
          buffer->strides, buffer->elem_offset, buffer->name,
          buffer->data_alignment, buffer->offset_factor, buffer->buffer_type);
      return BufferStore(new_buffer, store->value, store->indices);
580
581
582
583
    }
    return store;
  }

584
  PrimExpr VisitExpr_(const VarNode *op) final {
585
586
587
    auto var = Downcast<Var>(IRMutatorWithAnalyzer::VisitExpr_(op));
    if (buffer_data_to_buffer_.count(var)) {
      auto buffer = buffer_data_to_buffer_[var];
588
589
      if (buffer_remap_.count(buffer))
        return buffer_remap_[buffer]->data;
590
591
592
593
    }
    return var;
  }

594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
  Stmt VisitStmt_(const LetStmtNode *op) final {
    PrimExpr value = this->VisitExpr(op->value);
    bool recorded = false;
    if (value->IsInstance<BufferLoadNode>()) {
      let_bindings_[op->var] = value;
      recorded = true;
    }
    if (SideEffect(value) <= CallEffectKind::kPure) {
      analyzer_->Bind(op->var, value);
    }
    Stmt body = this->VisitStmt(op->body);
    if (recorded) {
      let_bindings_.erase(op->var);
    }
    if (value.same_as(op->value) && body.same_as(op->body)) {
      return GetRef<Stmt>(op);
    } else {
      auto n = this->CopyOnWrite(op);
      n->value = value;
      n->body = body;
      return Stmt(n);
    }
  }

618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
  /**
   * @brief Handle an Evaluate node, lowering a detected tile operator to TIR.
   *
   * This visit implementation detects whether the Evaluate node represents a
   * tile operator invocation (via ParseOperator). If no tile operator is found
   * or the call targets a global function, the node is delegated to the base
   * visitor.
   *
   * When a tile operator is present, the method:
   * - Builds a workspace-allocation callback that creates a dynamic shared
   * buffer named "workspace" (storage scope "shared.dyn") and returns its write
   *   access pointer.
   * - Determines thread bounds for lowering from the analyzer's constant-int
   *   information for thread_var_; if unavailable, a default range [0,1) is
   * used.
   * - Invokes tile_op->Lower(...) with LowerArgs containing target, thread
   *   bounds, thread variable, the workspace callback, layout and buffer remap
   *   maps, and the list of GEMM-involved buffer vars; the analyzer is passed
   *   through for use during lowering.
   *
   * The lowered statement returned by the operator is then visited by the base
   * IRMutatorWithAnalyzer and that result is returned.
   *
   * @return Stmt The (possibly transformed) statement after lowering or base
   * visitor processing.
   */
644
645
  Stmt VisitStmt_(const EvaluateNode *op) final {
    const CallNode *call = op->value.as<CallNode>();
646
647
648
649
650
    // Do not analysis the call node to the global function.
    if (call && call->op.as<GlobalVarNode>())
      return Downcast<Evaluate>(IRMutatorWithAnalyzer::VisitStmt_(op));

    auto tile_op = ParseOperator(GetRef<Stmt>(op), buffer_data_to_buffer_);
651
    if (!tile_op.defined())
652
      return IRMutatorWithAnalyzer::VisitStmt_(op);
653
    AddWorkspaceCallback callback = [this](int num_elem, DataType dtype) {
654
655
      auto workspace =
          decl_buffer({PrimExpr(num_elem)}, dtype, "workspace", "shared.dyn");
656
      workspaces_.push_back(workspace);
657
      return workspace.access_ptr(2); // write
658
659
    };

660
661
662
663
664
665
    Range thread_bounds;

    if (analyzer_->const_int_bound.IsBound(thread_var_->var)) {
      auto const_int_bound = analyzer_->const_int_bound(thread_var_);
      auto min_value = const_int_bound->min_value;
      auto max_value = const_int_bound->max_value;
666
      auto extent = max_value + 1 - min_value;
667
668
      thread_bounds =
          Range::FromMinExtent(IntImm(thread_var_->var.dtype(), min_value),
669
                               IntImm(thread_var_->var.dtype(), extent));
670
671
672
    } else {
      thread_bounds = Range::FromMinExtent(0, 1);
    }
673

674
675
676
677
    auto lowered = tile_op->Lower(
        LowerArgs{target_, thread_bounds, thread_var_->var, callback,
                  layout_map_, buffer_remap_, buffer_var_gemm_},
        analyzer_);
678
679
680
    return IRMutatorWithAnalyzer::VisitStmt(lowered);
  }

681
  Stmt VisitStmt_(const AttrStmtNode *op) final {
682
683
684
685
    if (op->attr_key == tir::attr::thread_extent) {
      IterVar iv = Downcast<IterVar>(op->node);
      ICHECK_NE(iv->thread_tag.length(), 0U);
      if (iv->thread_tag == "threadIdx.x") {
686
        thread_var_ = iv;
687
688
689
690
691
692
693
694
695
696
        ICHECK(iv->dom->extent.as<IntImmNode>());
        thread_block_size_ = iv->dom->extent.as<IntImmNode>()->value;
      }
    }
    return arith::IRMutatorWithAnalyzer::VisitStmt_(op);
  }

  Target target_;
  Map<Var, Buffer> buffer_data_to_buffer_;
  Map<Buffer, Layout> layout_map_;
697
  Map<Buffer, Layout> layout_remap_;
698
  Map<Buffer, Buffer> buffer_remap_;
699
700
701
702
  // This is a workaround for cpu backend,
  // we need to define a thread_var for the serial loop.
  IterVar thread_var_ = IterVar(Range::FromMinExtent(0, 1), Var("v_thread"),
                                IterVarType::kDataPar);
703
704
705
706
707
  size_t thread_block_size_ = 0;
  Array<Buffer> workspaces_;
  // For ptx Node, we need to remap the buffer and indices
  // By access CallNode instead of BufferLoad Node.
  bool is_ptx_{false};
708
709
  std::unordered_map<Var, PrimExpr, ObjectPtrHash, ObjectPtrEqual>
      let_bindings_;
710
711
  // Mapping from data Var of a Buffer to Buffer, for lookup
  std::unordered_map<Var, Buffer, ObjectPtrHash, ObjectPtrEqual> buffer_map_;
712
  Map<Var, Var> var_remap_;
713
  bool has_tma_{false};
714
  Array<Var> buffer_var_gemm_;
715
716
717
718
719
720
721
};

namespace transform {

using namespace tir::transform;

tvm::transform::Pass LowerTileOp() {
722
  auto pass_func = [=](PrimFunc f, const IRModule &m, const PassContext &ctx) {
723
724
725
726
727
    return LowerTileOpPass::Substitute(std::move(f));
  };
  return CreatePrimFuncPass(pass_func, 0, "tl.LowerTileOp", {});
}

728
729
730
731
TVM_FFI_STATIC_INIT_BLOCK({
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef().def("tl.transform.LowerTileOp", LowerTileOp);
});
732
} // namespace transform
733

734
735
} // namespace tl
} // namespace tvm