multi_version_buffer_rewriter.cc 16.5 KB
Newer Older
1
2
3
4
5
/*!
 * \file warp_specialized_pipeline.cc
 * \brief Warp specialized Pipeline for cuda GPU (sm90+)
 */

6
#include <tvm/ffi/reflection/registry.h>
7
8
9
10
11
12
#include <tvm/tir/analysis.h>
#include <tvm/tir/builtin.h>
#include <tvm/tir/op.h>
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/transform.h>

13
14
#include <functional>
#include <unordered_set>
15
16
#include <utility>

17
18
19
20
21
22
23
#include "../op/builtin.h"

namespace tvm {
namespace tl {

using namespace tir;

24
enum class Role : uint8_t { kConsumer, kProducer, kBoth };
25
26

class WarpSpecializedRoleMarker_ : public StmtVisitor {
27
public:
28
  WarpSpecializedRoleMarker_(Map<Var, Buffer> buffer_data_to_buffer)
29
      : buffer_data_to_buffer_(std::move(buffer_data_to_buffer)) {}
30

31
  Role GetRole(const StmtNode *stmt) const {
32
33
34
35
36
    auto it = map_.find(stmt);
    ICHECK(it != map_.end());
    return it->second;
  }

37
  Role GetRole(const Stmt &stmt) const { return GetRole(stmt.get()); }
38

39
  void VisitStmt_(const EvaluateNode *op) final {
40
41
    Role role = Role::kConsumer;
    if (auto call = op->value.as<CallNode>()) {
42
      if (call->op.same_as(tma_load()) || call->op.same_as(tma_load_im2col())) {
43
44
45
46
47
48
49
        role = Role::kProducer;
        has_bulk_copy_ = true;
      }
    }
    SetRole(op, role);
  }

50
51
52
  void VisitStmt_(const BufferStoreNode *op) final {
    bool is_shared_store =
        op->buffer.scope() == "shared.dyn" || op->buffer.scope() == "shared";
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
    if (!is_shared_store) {
      SetRole(op, Role::kConsumer);
      return;
    }

    // Check reads from global
    Block block(/*iter_vars=*/{}, /*reads=*/{}, /*writes=*/{}, /*name_hint=*/"",
                /*body*/ GetRef<Stmt>(op));
    auto access = GetBlockReadWriteRegion(block, buffer_data_to_buffer_);
    auto reads = access[0];
    Role role = Role::kProducer;
    for (auto read : reads) {
      if (read->buffer.scope() != "global") {
        role = Role::kConsumer;
        break;
      }
    }
70
71
    if (role == Role::kProducer)
      has_simt_copy_ = true;
72
73
74
    SetRole(op, role);
  }

75
  void VisitStmt_(const SeqStmtNode *op) final {
76
77
78
79
80
81
82
83
84
85
86
    StmtVisitor::VisitStmt_(op);
    auto role = GetRole(op->seq[0]);
    for (auto stmt : op->seq) {
      if (role != GetRole(stmt)) {
        role = Role::kBoth;
        break;
      }
    }
    SetRole(op, role);
  }

87
  void VisitStmt_(const IfThenElseNode *op) final {
88
89
90
91
    StmtVisitor::VisitStmt_(op);
    auto role = GetRole(op->then_case);
    if (op->else_case.defined()) {
      auto role_else = GetRole(op->else_case.value());
92
93
      if (role != role_else)
        role = Role::kBoth;
94
95
96
97
    }
    SetRole(op, role);
  }

98
  void VisitStmt_(const BlockRealizeNode *op) final {
99
100
101
102
    StmtVisitor::VisitStmt_(op);
    SetRole(op, GetRole(op->block));
  }

103
  template <class NodeType> void HandleBodyStmt(const NodeType *op) {
104
105
106
107
    StmtVisitor::VisitStmt_(op);
    SetRole(op, GetRole(op->body));
  }

108
109
110
111
112
  void VisitStmt_(const ForNode *op) final { HandleBodyStmt(op); }
  void VisitStmt_(const LetStmtNode *op) final { HandleBodyStmt(op); }
  void VisitStmt_(const AttrStmtNode *op) final { HandleBodyStmt(op); }
  void VisitStmt_(const AssertStmtNode *op) final { HandleBodyStmt(op); }
  void VisitStmt_(const BlockNode *op) final { HandleBodyStmt(op); }
113
114
115
116
117

  bool HasProducer() { return has_simt_copy_ || has_bulk_copy_; }

  bool HasSimtCopy() { return has_simt_copy_; }

118
119
private:
  void SetRole(const StmtNode *stmt, Role role) { map_[stmt] = role; }
120
  Map<Var, Buffer> buffer_data_to_buffer_;
121
  std::unordered_map<const StmtNode *, Role> map_;
122
123
124
125
126
  bool has_simt_copy_ = false;
  bool has_bulk_copy_ = false;
};

class MultiVersionBufferRewriter : public StmtExprMutator {
127
128
public:
  static PrimFunc Substitute(PrimFunc &f) {
129
130
131
132
133
134
135
136
137
138
    auto rewriter = MultiVersionBufferRewriter();
    rewriter.buffer_lca_ = DetectBufferAccessLCA(f);
    for (auto [buffer, _] : rewriter.buffer_lca_) {
      Var buffer_var = buffer->data;
      rewriter.buffer_data_to_buffer_.Set(buffer_var, buffer);
    }
    f.CopyOnWrite()->body = rewriter(f->body);
    return f;
  }

139
private:
140
141
  MultiVersionBufferRewriter() = default;

142
143
  Array<Buffer> GetVersionedBuffers(const Array<Stmt> &seq_stmt,
                                    const Array<Buffer> &scoped_buffers) {
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    Array<Stmt> pipeline_stmts;
    std::function<void(const Stmt &)> collect_stmts = [&](const Stmt &stmt) {
      if (const auto *seq = stmt.as<SeqStmtNode>()) {
        for (const Stmt &s : seq->seq) {
          collect_stmts(s);
        }
        return;
      }
      if (const auto *let = stmt.as<LetStmtNode>()) {
        collect_stmts(let->body);
        return;
      }
      if (const auto *attr = stmt.as<AttrStmtNode>()) {
        collect_stmts(attr->body);
        return;
      }
      if (const auto *block_realize = stmt.as<BlockRealizeNode>()) {
        collect_stmts(block_realize->block->body);
        return;
      }
      if (const auto *block = stmt.as<BlockNode>()) {
        collect_stmts(block->body);
        return;
      }
      pipeline_stmts.push_back(stmt);
    };
    for (const Stmt &stmt : seq_stmt) {
      collect_stmts(stmt);
    }

174
175
176
    std::vector<Role> roles;
    Array<Array<BufferRegion>> reads, writes;
    auto marker = WarpSpecializedRoleMarker_(buffer_data_to_buffer_);
177
    for (const Stmt &stmt : pipeline_stmts) {
178
      marker(stmt);
179
180
      Block block(/*iter_vars=*/{}, /*reads=*/{}, /*writes=*/{},
                  /*name_hint=*/"", /*body*/ stmt);
181
      auto access = GetBlockAccessRegion(block, buffer_data_to_buffer_);
182
183
      reads.push_back(access[0]);
      writes.push_back(access[1]);
184
185
186
      roles.push_back(marker.GetRole(stmt));
    }

187
    std::unordered_set<const BufferNode *> consumer_used, producer_used;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    std::unordered_map<const BufferNode *, size_t> first_write_index;
    std::unordered_map<const BufferNode *, size_t> last_read_index;
    auto is_copy_stage = [&](size_t idx) {
      bool has_shared_write = false;
      for (const BufferRegion &wr : writes[idx]) {
        auto scope = wr->buffer.scope();
        if (scope == "shared" || scope == "shared.dyn") {
          has_shared_write = true;
          break;
        }
      }
      if (!has_shared_write)
        return false;
      for (const BufferRegion &rd : reads[idx]) {
        if (rd->buffer.scope() == "global") {
          return true;
        }
      }
      return false;
    };
    for (size_t i = 0; i < pipeline_stmts.size(); i++) {
      bool copy_stage = is_copy_stage(i);
      bool is_producer = roles[i] == Role::kProducer ||
                         (roles[i] == Role::kBoth && copy_stage);
      bool is_consumer = roles[i] == Role::kConsumer ||
                         (roles[i] == Role::kBoth && !copy_stage);
      if (is_producer) {
        for (BufferRegion br : writes[i]) {
216
          producer_used.insert(br->buffer.get());
217
218
219
220
        }
      }
      if (is_consumer) {
        for (BufferRegion br : reads[i]) {
221
          consumer_used.insert(br->buffer.get());
222
223
224
225
226
227
228
229
230
231
        }
      }
      for (BufferRegion br : writes[i]) {
        const BufferNode *buf = br->buffer.get();
        if (!first_write_index.count(buf)) {
          first_write_index[buf] = i;
        }
      }
      for (BufferRegion br : reads[i]) {
        last_read_index[br->buffer.get()] = i;
232
233
234
235
      }
    }
    Array<Buffer> versioned_buffers;
    for (Buffer buffer : scoped_buffers) {
236
237
      if (consumer_used.count(buffer.get()) &&
          producer_used.count(buffer.get())) {
238
        versioned_buffers.push_back(buffer);
239
240
241
242
243
244
245
246
247
248
249
        continue;
      }
      // Fallback: if we saw a write before a later read, the buffer spans
      // multiple stages even if role classification missed one side.
      auto it_w = first_write_index.find(buffer.get());
      auto it_r = last_read_index.find(buffer.get());
      if (it_w != first_write_index.end() && it_r != last_read_index.end() &&
          it_w->second < it_r->second) {
        if (!is_copy_stage(it_w->second))
          continue;
        versioned_buffers.push_back(buffer);
250
251
252
253
254
      }
    }
    return versioned_buffers;
  }

255
  static Buffer RewriteAllocBuffer(const Buffer &buffer, int num_versions) {
256
257
    ObjectPtr<BufferNode> new_buffer = make_object<BufferNode>(*(buffer.get()));
    new_buffer->shape.insert(new_buffer->shape.begin(), PrimExpr(num_versions));
258
    if (!new_buffer->strides.empty()) {
259
260
261
262
263
264
265
      ICHECK(new_buffer->strides.size() + 1 == new_buffer->shape.size());
      PrimExpr stride_0 = new_buffer->strides[0] * new_buffer->shape[1];
      new_buffer->strides.insert(new_buffer->strides.begin(), stride_0);
    }
    return Buffer(new_buffer);
  }

266
267
268
  Stmt VisitStmt_(const BlockRealizeNode *op) final {
    BlockRealize block_realize =
        Downcast<BlockRealize>(StmtExprMutator::VisitStmt_(op));
269
270
271
272
273
274
275
276
277
278
279
    Block block = block_realize->block;
    Array<Buffer> alloc_buffers;
    for (auto buffer : block->alloc_buffers) {
      if (buffer_remap_.count(buffer)) {
        Buffer new_buffer = buffer_remap_[buffer];
        alloc_buffers.push_back(new_buffer);
      } else {
        alloc_buffers.push_back(buffer);
      }
    }
    block.CopyOnWrite()->alloc_buffers = std::move(alloc_buffers);
280
281
    // Record the updated alloc list to recover buffers whose LCA is the block.
    block_alloc_buffers_[op->block.get()] = block->alloc_buffers;
282
283
284
285
    block_realize.CopyOnWrite()->block = block;
    return block_realize;
  }

286
287
288
289
290
291
292
  Stmt VisitStmt_(const BlockNode *op) final {
    stmt_stack_.push_back(op);
    Stmt stmt = StmtExprMutator::VisitStmt_(op);
    stmt_stack_.pop_back();
    return stmt;
  }

293
  Stmt VisitStmt_(const ForNode *op) final {
294
    stmt_stack_.push_back(op);
295
    loop_stack_.emplace_back(op->loop_var, op->extent);
296
    auto num_stages_anno = op->annotations.Get("num_stages");
297
    if (!num_stages_anno) {
298
299
      auto for_node = StmtExprMutator::VisitStmt_(op);
      loop_stack_.pop_back();
300
      stmt_stack_.pop_back();
301
302
      return for_node;
    }
303

304
305
    ICHECK(num_stages_anno->as<IntImmNode>());
    int num_stages = static_cast<int>(num_stages_anno->as<IntImmNode>()->value);
306

307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
    Stmt pipeline_body_root{nullptr};
    if (const auto *realize = op->body.as<BlockRealizeNode>()) {
      const auto &block = realize->block;
      for (const auto &buffer : block->alloc_buffers) {
        ICHECK(buffer->IsInstance<BufferNode>());
        buffer_data_to_buffer_.Set(buffer->data, buffer);
      }
      pipeline_body_root = block->body;
    } else {
      pipeline_body_root = op->body;
    }

    const SeqStmtNode *pipeline_body_seq = nullptr;
    {
      // Traverse trivial wrappers (let/if) to find the actual SeqStmt body.
      Stmt current = pipeline_body_root;
      while (true) {
        if (const auto *seq_stmt = current.as<SeqStmtNode>()) {
          pipeline_body_seq = seq_stmt;
          break;
        }
        if (const auto *if_then_else = current.as<IfThenElseNode>()) {
          ICHECK(!if_then_else->else_case.defined())
              << "MultiVersionBuffer: Can't handle the body of the loop "
                 "because the IfThenElse node has an else branch";
          current = if_then_else->then_case;
          continue;
        }
        if (const auto *let_stmt = current.as<LetStmtNode>()) {
          current = let_stmt->body;
          continue;
        }
        LOG(FATAL)
            << "MultiVersionBuffer: Can't handle the body of the loop because "
            << "it is not a SeqStmt, IfThenElse without else, "
            << "or LetStmt wrapping them, but got " << current->GetTypeKey();
      }
    }
    ICHECK(pipeline_body_seq != nullptr);
346

347
348
    Array<Buffer> scoped_buffers;
    std::unordered_set<const BufferNode *> seen;
349
    for (auto [buffer, stmt] : buffer_lca_) {
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
      if (!stmt.defined())
        continue;
      const StmtNode *lca = stmt.value().get();
      bool in_scope = false;
      for (const StmtNode *ancestor : stmt_stack_) {
        if (ancestor == lca) {
          in_scope = true;
          break;
        }
      }
      if (!in_scope)
        continue;
      // Only double-buffer shared allocations; locals do not need versioning.
      auto scope = buffer.scope();
      if (!(scope == "shared" || scope == "shared.dyn"))
        continue;
      if (seen.insert(buffer.get()).second) {
367
        scoped_buffers.push_back(buffer);
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
      }
    }
    for (auto it = stmt_stack_.rbegin(); it != stmt_stack_.rend(); ++it) {
      if (!(*it)->IsInstance<BlockNode>())
        continue;
      const auto *block = static_cast<const BlockNode *>(*it);
      auto map_it = block_alloc_buffers_.find(block);
      if (map_it == block_alloc_buffers_.end())
        continue;
      for (const Buffer &buffer : map_it->second) {
        auto scope = buffer.scope();
        if (!(scope == "shared" || scope == "shared.dyn"))
          continue;
        if (seen.insert(buffer.get()).second) {
          scoped_buffers.push_back(buffer);
        }
      }
385
386
    }

387
388
    Array<Buffer> versioned_buffers =
        GetVersionedBuffers(pipeline_body_seq->seq, scoped_buffers);
389
390
391
392
393
394

    for (auto buffer : versioned_buffers) {
      Var buffer_var = buffer->data;
      Buffer new_buffer = RewriteAllocBuffer(buffer, num_stages);
      buffer_remap_.Set(buffer, new_buffer);
    }
395
396
397
398
399
400
    PrimExpr linear_index = loop_stack_[0].first;
    for (size_t i = 1; i < loop_stack_.size(); ++i) {
      linear_index =
          linear_index * loop_stack_[i].second + loop_stack_[i].first;
    }
    version_index_ = FloorMod(linear_index, num_stages);
401
    auto for_node = StmtExprMutator::VisitStmt_(op);
402
    loop_stack_.pop_back();
403
    stmt_stack_.pop_back();
404
405
406
407

    return for_node;
  }

408
  PrimExpr VisitExpr_(const BufferLoadNode *op) final {
409
410
411
412
413
    BufferLoad load = Downcast<BufferLoad>(StmtExprMutator::VisitExpr_(op));
    auto it = buffer_remap_.find(load->buffer);
    if (it == buffer_remap_.end()) {
      return std::move(load);
    }
414
415
    const Buffer &new_buffer = (*it).second;
    auto *n = load.CopyOnWrite();
416
417
418
419
420
    n->buffer = new_buffer;
    n->indices.insert(n->indices.begin(), version_index_);
    return std::move(load);
  }

421
  Stmt VisitStmt_(const BufferStoreNode *op) final {
422
423
424
425
426
    BufferStore store = Downcast<BufferStore>(StmtExprMutator::VisitStmt_(op));
    auto it = buffer_remap_.find(store->buffer);
    if (it == buffer_remap_.end()) {
      return std::move(store);
    }
427
428
    const Buffer &new_buffer = (*it).second;
    auto *n = store.CopyOnWrite();
429
430
431
432
433
    n->buffer = new_buffer;
    n->indices.insert(n->indices.begin(), version_index_);
    return std::move(store);
  }

434
  PrimExpr VisitExpr_(const CallNode *op) final {
435
436
437
438
439
440
441
    Call call = Downcast<Call>(StmtExprMutator::VisitExpr_(op));
    if (call->op.same_as(builtin::tvm_access_ptr())) {
      return RewriteBufferAccess(call, {1});
    }
    return call;
  }

442
  PrimExpr RewriteBufferAccess(const Call &call,
443
                               const std::vector<int> &arg_indices) {
444
445
    auto product = [](const Array<PrimExpr> &input) {
      return foldl(
446
447
448
          [](PrimExpr a, PrimExpr b, Span span) {
            return mul(std::move(a), std::move(b), std::move(span));
          },
449
          make_const(DataType::Int(32), 1), input);
450
451
452
453
    };
    Array<PrimExpr> new_args = call->args;
    for (int i : arg_indices) {
      auto buffer_var = Downcast<Var>(call->args[i]);
454
455
456
      if (!buffer_data_to_buffer_.count(buffer_var))
        continue;
      const Buffer &buffer = buffer_data_to_buffer_[buffer_var];
457
458
      auto it = buffer_remap_.find(buffer);
      if (it != buffer_remap_.end()) {
459
460
        const Buffer &new_buffer = (*it).second;
        const PrimExpr &old_index = call->args[i + 1];
461
462
463
464
465
466
467
468
469
470
471
472
473
474
        PrimExpr offset;
        if (new_buffer->strides.empty()) {
          offset = product(buffer->shape);
        } else {
          offset = new_buffer->strides[0];
        }
        PrimExpr new_index = old_index + version_index_ * offset;
        new_args.Set(i + 1, new_index);
      }
    }
    return Call(call->dtype, call->op, new_args, call->span);
  }

  PrimExpr version_index_;
475
  std::vector<std::pair<Var, PrimExpr>> loop_stack_;
476
477
478
  // Track ancestor statements to query whether an LCA is inside the current
  // loop.
  std::vector<const StmtNode *> stmt_stack_;
479
480
481
  Map<Var, Buffer> buffer_data_to_buffer_;
  Map<Buffer, Optional<Stmt>> buffer_lca_;
  Map<Buffer, Buffer> buffer_remap_;
482
483
484
  // Remember each block's alloc list so the loop can see buffers defined in
  // parents.
  std::unordered_map<const BlockNode *, Array<Buffer>> block_alloc_buffers_;
485
486
487
488
489
};

using namespace tir::transform;

tvm::transform::Pass MultiVersionBuffer() {
490
  auto pass_func = [=](PrimFunc f, const IRModule &m, const PassContext &ctx) {
491
492
493
494
495
    return MultiVersionBufferRewriter::Substitute(f);
  };
  return CreatePrimFuncPass(pass_func, 0, "tl.MultiVersionBuffer", {});
}

496
497
498
499
TVM_FFI_STATIC_INIT_BLOCK({
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef().def("tl.transform.MultiVersionBuffer", MultiVersionBuffer);
});
500

501
502
} // namespace tl
} // namespace tvm