layout.cc 27.6 KB
Newer Older
1
2
3
4
5
6
/*!
 * \file layout/layout.cc
 *
 */

#include "layout.h"
7
#include <tvm/ffi/reflection/registry.h>
8
#include <tvm/runtime/logging.h>
9
10
11
12
13
14
15
16
17
18
19
20
21

#include <tvm/arith/pattern.h>
#include <tvm/tir/op.h>
#include <tvm/tir/stmt_functor.h>

#include "arith/pattern_match.h"
#include "utils.h"

namespace tvm {
namespace tl {

using namespace tir;

22
static Var getPlaceholder(const std::string &s) {
23
24
25
26
27
28
29
30
  static std::unordered_map<std::string, Var> map;
  if (map.find(s) == map.end()) {
    map[s] = Var(s);
  }
  return map[s];
}

Var ReplicationPlaceholder() { return getPlaceholder("_rep"); }
31
32
33
Var InputPlaceholder(size_t idx) {
  return getPlaceholder(std::string{'_', char('i' + idx)});
}
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

Map<Var, Range> LayoutNode::getVarMap() const {
  Map<Var, Range> map;
  for (size_t i = 0; i < InputDim(); i++) {
    map.Set(InputPlaceholder(i), {0, input_size_[i]});
  }
  return map;
}

Map<Var, Range> FragmentNode::getVarMap() const {
  auto map = LayoutNode::getVarMap();
  map.Set(ReplicationPlaceholder(), {0, ReplicateExtent()});
  return map;
}

49
50
LayoutNode::LayoutNode(Array<PrimExpr> input_size,
                       Array<PrimExpr> forward_index) {
51
52
53
  input_size_ = input_size;
  arith::Analyzer analyzer;
  UpdateAnalyzer(&analyzer);
54
55
  forward_index_ = forward_index.Map(
      [&](const PrimExpr &e) { return analyzer.Simplify(e); });
56
57
58
59
60
61
62
63
64
65
}

Layout::Layout(Array<IterVar> forward_var, Array<PrimExpr> forward_index) {
  Map<Var, PrimExpr> vmap;
  Array<PrimExpr> input_size;
  for (size_t i = 0; i < forward_var.size(); i++) {
    vmap.Set(forward_var[i]->var, InputPlaceholder(i));
    CHECK(is_zero(forward_var[i]->dom->min));
    input_size.push_back(forward_var[i]->dom->extent);
  }
66
67
  forward_index =
      forward_index.Map([&](const PrimExpr &e) { return Substitute(e, vmap); });
68
  auto n = tvm::ffi::make_object<LayoutNode>(input_size, forward_index);
69
70
71
72
  data_ = std::move(n);
}

Layout::Layout(Array<PrimExpr> input_size, Array<PrimExpr> forward_index) {
73
  auto n = tvm::ffi::make_object<LayoutNode>(input_size, forward_index);
74
75
76
  data_ = std::move(n);
}

77
78
79
80
81
void LayoutNode::RegisterReflection() {
  namespace refl = tvm::ffi::reflection;
  refl::ObjectDef<LayoutNode>()
      .def_ro("input_size", &LayoutNode::input_size_)
      .def_ro("forward_index", &LayoutNode::forward_index_);
82
83
}

84
85
void LayoutNode::UpdateAnalyzer(arith::Analyzer *analyzer) const {
  for (const auto &[var, dom] : getVarMap()) {
86
87
88
89
    analyzer->Bind(var, dom);
  }
}

90
91
92
93
94
95
96
97
Array<PrimExpr> LayoutNode::GetForwardVars() const {
  Array<PrimExpr> vars;
  for (size_t i = 0; i < InputDim(); i++) {
    vars.push_back(InputPlaceholder(i));
  }
  return vars;
}

98
99
100
101
102
103
104
Array<PrimExpr> LayoutNode::OutputShape() const {
  Array<PrimExpr> ret(OutputDim(), 1);
  arith::Analyzer analyzer;
  UpdateAnalyzer(&analyzer);
  for (size_t i = 0; i < ret.size(); i++) {
    auto ist = analyzer.int_set(forward_index_[i] + 1);
    if (arith::is_neg_inf(ist.min()) && arith::is_pos_inf(ist.max())) {
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
      // Analyzer couldn't form an IntervalSet (e.g. bitwise ops).
      // Fall back to ConstIntBound to derive a safe extent.
      auto cib = analyzer.const_int_bound(forward_index_[i]);
      if (cib->min_value != arith::ConstIntBound::kNegInf &&
          cib->max_value != arith::ConstIntBound::kPosInf &&
          cib->min_value >= 0) {
        // extent = max - min + 1, using 64-bit integer literal
        ret.Set(i, Integer(cib->max_value - cib->min_value + 1));
      } else {
        // Last-resort conservative fallback to avoid OOB/crash
        // Prefer to keep dimension from known input_size_ if available.
        if (i < input_size_.size()) {
          ret.Set(i, input_size_[i]);
        } else {
          ret.Set(i, Integer(1));
        }
      }
122
123
124
125
126
127
128
    } else {
      ret.Set(i, ist.max());
    }
  }
  return ret;
}

129
130
131
Array<PrimExpr> LayoutNode::Forward(const Array<PrimExpr> &vars) const {
  if (vars.empty())
    return forward_index_;
132
133
134
135
136
137
138
139
  ICHECK_GE(vars.size(), InputDim());

  // Take the last InputDim() elements for transformation
  Array<PrimExpr> transform_vars;
  for (size_t i = vars.size() - InputDim(); i < vars.size(); i++) {
    transform_vars.push_back(vars[i]);
  }

140
141
  Map<Var, PrimExpr> vmap;
  for (size_t i = 0; i < InputDim(); i++) {
142
    vmap.Set(InputPlaceholder(i), transform_vars[i]);
143
  }
144
145

  Array<PrimExpr> transformed = forward_index_.Map(
146
      [&](const PrimExpr &e) { return Substitute(e, vmap); });
147
148
149
150
151
152
153
154
155
156
  // Concatenate with the remaining elements from vars
  Array<PrimExpr> result;
  for (size_t i = 0; i < vars.size() - InputDim(); i++) {
    result.push_back(vars[i]);
  }
  for (const auto &expr : transformed) {
    result.push_back(expr);
  }

  return result;
157
158
}

159
160
Fragment FragmentNode::Repeat(const Array<PrimExpr> &repeats,
                              bool repeat_on_thread,
161
162
163
164
165
166
                              bool lower_dim_first) const {
  ICHECK_EQ(repeats.size(), InputDim());
  Array<PrimExpr> new_input_size;
  Map<Var, PrimExpr> vmap;
  for (size_t i = 0; i < InputDim(); i++) {
    new_input_size.push_back(input_size_[i] * repeats[i]);
167
168
    vmap.Set(InputPlaceholder(i),
             FloorMod(InputPlaceholder(i), InputShape()[i]));
169
170
171
172
173
  }

  PrimExpr repeats_index = 0, repeat_stride = 1;
  if (lower_dim_first) {
    for (int i = InputDim() - 1; i >= 0; i--) {
174
175
      repeats_index +=
          repeat_stride * FloorDiv(InputPlaceholder(i), InputShape()[i]);
176
177
178
179
      repeat_stride *= repeats[i];
    }
  } else {
    for (size_t i = 0; i < InputDim(); i++) {
180
181
      repeats_index +=
          repeat_stride * FloorDiv(InputPlaceholder(i), InputShape()[i]);
182
183
184
185
186
187
      repeat_stride *= repeats[i];
    }
  }

  if (repeat_on_thread) {
    PrimExpr thread_size = ThreadExtent();
188
189
190
191
192
    auto new_forward_index = forward_index_.Map(
        [&](const PrimExpr &e) { return Substitute(e, vmap); });
    auto new_forward_thread =
        Substitute(forward_thread_, vmap) + thread_size * repeats_index;
    return Fragment(new_input_size, new_forward_index, new_forward_thread,
193
                    replicate_size_, std::nullopt);
194
195
196
197
198
199
  } else {
    ICHECK(OutputDim() == 1);
    PrimExpr frag_len = OutputShape()[0];
    Array<PrimExpr> new_forward_index = {Substitute(forward_index_[0], vmap) +
                                         frag_len * repeats_index};
    PrimExpr new_forward_thread = Substitute(forward_thread_, vmap);
200
    return Fragment(new_input_size, new_forward_index, new_forward_thread,
201
                    replicate_size_, std::nullopt);
202
203
204
205
206
207
  }
}

Fragment FragmentNode::Replicate(int repeats) const {
  ICHECK(repeats >= 1);
  Map<Var, PrimExpr> vmap;
208
209
  vmap.Set(ReplicationPlaceholder(),
           FloorMod(ReplicationPlaceholder(), ReplicateExtent()));
210
211
212
  PrimExpr new_forward_thread =
      Substitute(forward_thread_, vmap) +
      ThreadExtent() * FloorDiv(ReplicationPlaceholder(), ReplicateExtent());
213
  return Fragment(input_size_, forward_index_, new_forward_thread,
214
                  ReplicateExtent() * repeats, std::nullopt);
215
216
217
218
219
220
221
222
223
224
225
226
}

Fragment FragmentNode::DeReplicate() const {
  ICHECK(OutputDim() == 1);
  arith::Analyzer analyzer;
  UpdateAnalyzer(&analyzer);
  int factor = 1;
  auto rep_size = as_const_int(ReplicateExtent());
  auto idx_size = as_const_int(OutputShape()[0]);
  if (rep_size && idx_size) {
    factor = arith::ZeroAwareGCD(*rep_size, *idx_size);
  }
227
  if (factor == 1)
228
    return tvm::ffi::GetRef<Fragment>(this);
229
230

  Map<Var, PrimExpr> vmap;
231
232
  vmap.Set(ReplicationPlaceholder(), ReplicationPlaceholder() * factor +
                                         FloorMod(forward_index_[0], factor));
233
234
  PrimExpr new_forward_thread = Substitute(forward_thread_, vmap);
  Array<PrimExpr> new_forward_index = {FloorDiv(forward_index_[0], factor)};
235
  return Fragment(input_size_, new_forward_index, new_forward_thread,
236
                  int(*rep_size) / factor, std::nullopt);
237
238
}

239
Fragment FragmentNode::BindThreadRange(Range thread_range) const {
240
  auto n = tvm::ffi::make_object<FragmentNode>(*this);
241
242
  n->thread_range_ = thread_range;
  return Fragment(n);
243
244
}

245
std::pair<Layout, arith::IterMapLevel> LayoutNode::InverseWithLevel() const {
246
  arith::Analyzer analyzer;
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
  auto collect_symbolic = [&](const Array<PrimExpr> &shape) {
    Array<PrimExpr> symbolic_dims;
    for (const auto &dim : shape) {
      if (!as_const_int(dim)) {
        symbolic_dims.push_back(dim);
      }
    }
    return symbolic_dims;
  };
  Array<PrimExpr> symbolic_dims = collect_symbolic(input_size_);
  Array<PrimExpr> output_shape = OutputShape();
  symbolic_dims.insert(symbolic_dims.end(), output_shape.begin(),
                       output_shape.end());
  symbolic_dims = collect_symbolic(symbolic_dims);
  bool is_static_shape = symbolic_dims.empty();
  auto level = is_static_shape ? arith::IterMapLevel::Bijective
                               : arith::IterMapLevel::NoCheck;
  if (!is_static_shape) {
    // Runtime guards keep dynamic tails safe, so we allow NoCheck here and
    // warn.
267
268
269
    DLOG(WARNING) << "Layout::Inverse on symbolic layout, falling back to "
                     "NoCheck; symbolic dims: "
                  << symbolic_dims;
270
  }
271
  arith::IterMapResult res =
272
      arith::DetectIterMap(forward_index_, getVarMap(), 1, level, &analyzer);
273
274
275
276
277
  if (!res->errors.empty()) {
    std::ostringstream msg;
    msg << "Layout " << DebugOutput() << " has errors: " << res->errors;
    throw NormalizeIterException(msg.str());
  }
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295

  auto outputs_shape = OutputShape();
  Array<PrimExpr> outputs;
  for (size_t i = 0; i < OutputDim(); i++) {
    outputs.push_back(InputPlaceholder(i));
  }

  auto inv = arith::InverseAffineIterMap(res->indices, outputs);

  Array<PrimExpr> backward_index;
  for (size_t i = 0; i < InputDim(); i++) {
    if (inv.find(InputPlaceholder(i)) != inv.end()) {
      backward_index.push_back(inv[InputPlaceholder(i)]);
    } else {
      backward_index.push_back(0);
    }
  }

296
  return {Layout(outputs_shape, backward_index), level};
297
298
}

299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
Layout LayoutNode::Reshape(const Array<PrimExpr> &shape,
                           arith::Analyzer *analyzer) const {
  // Fast path: if shape is the same, return the original layout
  if (StructuralEqual()(InputShape(), shape)) {
    return ffi::GetRef<Layout>(this);
  }

  // Step 1. Prove the product of InputShape is equal to the product of shape
  PrimExpr input_shape_product = Integer(1);
  for (const auto &dim : InputShape()) {
    input_shape_product *= dim;
  }
  PrimExpr shape_product = Integer(1);
  for (const auto &dim : shape) {
    shape_product *= dim;
  }

316
317
318
319
320
321
  // Use provided analyzer if present, otherwise a local fallback to avoid
  // potential null dereference paths flagged by static analysis.
  arith::Analyzer fallback_analyzer;
  arith::Analyzer *az = analyzer ? analyzer : &fallback_analyzer;
  ICHECK(az->CanProveEqual(input_shape_product, shape_product))
      << "InputShape() = " << InputShape() << " shape = " << shape;
322
323
324
325

  // Step 2. Create new forward indices by reshaping
  // For each dimension in the new shape, we create a placeholder variable
  Array<Var> new_vars;
326
  new_vars.reserve(shape.size());
327
  for (size_t i = 0; i < shape.size(); ++i) {
328
329
330
    auto var = Var(std::string("n_") + std::to_string(i), shape[i].dtype());
    az->Bind(var, Range(0, shape[i]));
    new_vars.push_back(var);
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
  }
  // Step 3. Compute the flat index from new shape indices
  // flat_index = k0 * (s1 * s2 * ...) + k1 * (s2 * s3 * ...) + ... + kn
  PrimExpr flat_index = Integer(0);
  for (size_t i = 0; i < shape.size(); ++i) {
    PrimExpr stride = Integer(1);
    for (size_t j = i + 1; j < shape.size(); ++j) {
      stride = stride * shape[j];
    }
    flat_index = flat_index + new_vars[i] * stride;
  }
  // Step 4. Convert flat index back to original shape indices
  // For original shape [s0, s1, ..., sm]:
  // i0 = flat_index // (s1 * s2 * ... * sm)
  // i1 = (flat_index % (s1 * s2 * ... * sm)) // (s2 * s3 * ... * sm)
  // ...
  Array<PrimExpr> original_indices;
  PrimExpr remaining = flat_index;
  for (size_t i = 0; i < InputShape().size(); ++i) {
    PrimExpr stride = Integer(1);
    for (size_t j = i + 1; j < InputShape().size(); ++j) {
      stride = stride * InputShape()[j];
    }
    original_indices.push_back(floordiv(remaining, stride));
    remaining = floormod(remaining, stride);
  }
  // Step 5. Substitute original indices into forward_index_
  Array<PrimExpr> new_forward_index;
  for (const auto &fwd_expr : forward_index_) {
    PrimExpr substituted = fwd_expr;
    // Replace each InputPlaceholder(i) with original_indices[i]
    for (size_t i = 0; i < InputShape().size(); ++i) {
      substituted =
          Substitute(substituted, {{InputPlaceholder(i), original_indices[i]}});
    }
366
367
368
369
370
    new_forward_index.push_back(az->Simplify(substituted));
  }
  for (size_t i = 0; i < new_vars.size(); ++i) {
    new_forward_index =
        Substitute(new_forward_index, {{new_vars[i], InputPlaceholder(i)}});
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
  }
  return Layout(shape, new_forward_index);
}

Layout FragmentNode::Reshape(const Array<PrimExpr> &shape,
                             arith::Analyzer *analyzer) const {
  // Fast path: identical input shape, return self
  if (StructuralEqual()(InputShape(), shape)) {
    return ffi::GetRef<Fragment>(this);
  }

  // 1) Prove total number of elements remains the same
  PrimExpr input_prod = Integer(1);
  for (const auto &d : InputShape())
    input_prod *= d;
  PrimExpr shape_prod = Integer(1);
  for (const auto &d : shape)
    shape_prod *= d;

390
391
392
393
394
395
  // Use provided analyzer if present, otherwise a local fallback.
  arith::Analyzer fallback_analyzer;
  arith::Analyzer *az = analyzer ? analyzer : &fallback_analyzer;
  ICHECK(az->CanProveEqual(input_prod, shape_prod))
      << "InputShape() = " << InputShape() << " shape = " << shape
      << " input fragment layout is = " << DebugOutput();
396
397
398
399

  // 2) Build flat index from new-shape indices
  Array<Var> new_vars;
  new_vars.reserve(shape.size());
400
401
402
403
404
405
406
407
408
  for (size_t i = 0; i < shape.size(); ++i) {
    // Cannot use InputPlaceholder(i) here, because it would cause name capture
    // (variable capture) with InputPlaceholder(i) in upper scopes. Therefore,
    // we must create a fresh variable here to avoid confusion when
    // substituting.
    auto var = Var(std::string("n_") + std::to_string(i), shape[i].dtype());
    az->Bind(var, Range(0, shape[i]));
    new_vars.push_back(var);
  }
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433

  PrimExpr flat = Integer(0);
  for (size_t i = 0; i < shape.size(); ++i) {
    PrimExpr stride = Integer(1);
    for (size_t j = i + 1; j < shape.size(); ++j)
      stride = stride * shape[j];
    flat = flat + new_vars[i] * stride;
  }
  // 3) Recover original indices from flat index
  Array<PrimExpr> orig_indices;
  PrimExpr remain = flat;
  for (size_t i = 0; i < InputShape().size(); ++i) {
    PrimExpr stride = Integer(1);
    for (size_t j = i + 1; j < InputShape().size(); ++j)
      stride = stride * InputShape()[j];
    orig_indices.push_back(floordiv(remain, stride));
    remain = floormod(remain, stride);
  }
  // 4) Substitute old placeholders with expressions of new indices
  Array<PrimExpr> new_forward_index;
  for (const auto &e : forward_index_) {
    PrimExpr cur = e;
    for (size_t i = 0; i < InputShape().size(); ++i) {
      cur = Substitute(cur, {{InputPlaceholder(i), orig_indices[i]}});
    }
434
    cur = az->Simplify(cur);
435
436
437
438
439
440
441
    new_forward_index.push_back(cur);
  }
  PrimExpr new_forward_thread = forward_thread_;
  for (size_t i = 0; i < InputShape().size(); ++i) {
    new_forward_thread = Substitute(new_forward_thread,
                                    {{InputPlaceholder(i), orig_indices[i]}});
  }
442
443
444
445
446
447
448
449
  new_forward_thread = az->Simplify(new_forward_thread);
  for (size_t i = 0; i < new_vars.size(); ++i) {
    auto var = new_vars[i];
    new_forward_index =
        Substitute(new_forward_index, {{var, InputPlaceholder(i)}});
    new_forward_thread =
        Substitute(new_forward_thread, {{var, InputPlaceholder(i)}});
  }
450
451
452
453
454
455
456
457
  Fragment reshaped(shape, new_forward_index, new_forward_thread,
                    ReplicateExtent(), std::nullopt);
  if (thread_range_.defined()) {
    reshaped = reshaped->BindThreadRange(thread_range_);
  }
  return reshaped;
}

458
459
460
461
Layout LayoutNode::Inverse() const {
  auto inverse_result = InverseWithLevel();
  return std::move(inverse_result.first);
}
462

463
464
465
466
467
PrimExpr infer_fragment_index(const Map<Var, Range> &input_iters,
                              const PrimExpr &forward_thread,
                              arith::Analyzer *analyzer) {
  Array<arith::IterSplitExpr> splits = DivideUnusedIterators(
      {forward_thread}, ToIterVars(input_iters), analyzer);
468
469

  Array<arith::IterSplitExpr> split_without_rep;
470
  for (const auto &split : splits) {
471
    CHECK(split->source->source.as<Var>());
472
473
474
    if (split->source->source.as<Var>().value().same_as(
            ReplicationPlaceholder()))
      continue;
475
476
477
478
479
    split_without_rep.push_back(split);
  }
  return MakeFlattenedExpression(split_without_rep);
}

480
481
FragmentNode::FragmentNode(Array<PrimExpr> input_size,
                           Array<PrimExpr> forward_index,
482
483
484
485
486
487
488
                           PrimExpr forward_thread, PrimExpr replicate_size) {
  input_size_ = input_size;
  replicate_size_ = replicate_size;
  arith::Analyzer analyzer;
  UpdateAnalyzer(&analyzer);
  forward_thread_ = analyzer.Simplify(forward_thread);
  if (forward_index.empty()) {
489
490
    forward_index = {
        infer_fragment_index(getVarMap(), forward_thread_, &analyzer)};
491
  }
492
493
  forward_index_ = forward_index.Map(
      [&](const PrimExpr &e) { return analyzer.Simplify(e); });
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
}

Fragment::Fragment(Array<IterVar> forward_var, Array<PrimExpr> forward_index,
                   PrimExpr forward_thread, IterVar thread_replicate) {
  Map<Var, PrimExpr> vmap;
  Array<PrimExpr> input_size;
  PrimExpr replicate_size = 1;
  for (size_t i = 0; i < forward_var.size(); i++) {
    vmap.Set(forward_var[i]->var, InputPlaceholder(i));
    CHECK(is_zero(forward_var[i]->dom->min));
    input_size.push_back(forward_var[i]->dom->extent);
  }
  if (thread_replicate.defined()) {
    ICHECK(is_zero(thread_replicate->dom->min));
    replicate_size = thread_replicate->dom->extent;
    vmap.Set(thread_replicate->var, ReplicationPlaceholder());
  }
511
512
  forward_index =
      forward_index.Map([&](const PrimExpr &e) { return Substitute(e, vmap); });
513
514
  forward_thread = Substitute(forward_thread, vmap);

515
516
  auto n = tvm::ffi::make_object<FragmentNode>(input_size, forward_index,
                                               forward_thread, replicate_size);
517
518
519
520
  data_ = std::move(n);
}

Fragment::Fragment(Array<PrimExpr> input_size, Array<PrimExpr> forward_index,
521
522
                   PrimExpr forward_thread, PrimExpr replicate_size,
                   Optional<Var> replicate_var) {
523
  if (replicate_var.defined()) {
524
525
    forward_thread = Substitute(
        forward_thread, {{replicate_var.value(), ReplicationPlaceholder()}});
526
  }
527
528
  auto n = tvm::ffi::make_object<FragmentNode>(input_size, forward_index,
                                               forward_thread, replicate_size);
529
530
531
  data_ = std::move(n);
}

532
533
534
535
536
537
538
// which means the forward_thread is rep_var -> lambda i, rep: rep
bool FragmentNode::IsCompletedReplicated() const {
  arith::Analyzer analyzer;
  return ExprDeepEqual()(analyzer.Simplify(forward_thread_),
                         ReplicationPlaceholder());
}

539
540
541
542
543
544
545
546
PrimExpr FragmentNode::ThreadExtent() const {
  Array<PrimExpr> ret(OutputDim(), 1);
  arith::Analyzer analyzer;
  UpdateAnalyzer(&analyzer);
  auto ist = analyzer.int_set(forward_thread_ + 1);
  return ist.max();
}

547
548
549
550
551
552
553
554
555
556
557
Array<PrimExpr> FragmentNode::GetForwardVars() const {
  Array<PrimExpr> vars;
  if (*as_const_int(ReplicateExtent()) > 1) {
    vars.push_back(ReplicationPlaceholder());
  }
  for (size_t i = 0; i < InputDim(); i++) {
    vars.push_back(InputPlaceholder(i));
  }
  return vars;
}

558
559
PrimExpr FragmentNode::ForwardThread(const Array<PrimExpr> &vars,
                                     const Optional<PrimExpr> &rep_var) const {
560
561
562
563
564
  Map<Var, PrimExpr> vmap;
  ICHECK_EQ(vars.size(), InputDim());
  for (size_t i = 0; i < InputDim(); i++) {
    vmap.Set(InputPlaceholder(i), vars[i]);
  }
565
566
  if (rep_var.defined())
    vmap.Set(ReplicationPlaceholder(), rep_var.value());
567
568
569
570
571

  return Substitute(forward_thread_, vmap);
}

Layout FragmentNode::Inverse() const {
572
573
574
575
576
  auto result = InverseWithLevel();
  return std::move(result.first);
}

std::pair<Layout, arith::IterMapLevel> FragmentNode::InverseWithLevel() const {
577
578
579
580
  auto input_size_copy = input_size_;
  input_size_copy.push_back(ReplicateExtent());
  auto forward_index_copy = forward_index_;
  forward_index_copy.push_back(
581
582
      Substitute(forward_thread_,
                 {{ReplicationPlaceholder(), InputPlaceholder(InputDim())}}));
583
  auto fwd = Layout(input_size_copy, forward_index_copy);
584
  return fwd->InverseWithLevel();
585
586
587
588
589
590
591
592
}

Fragment FragmentNode::CondenseReplicateVar() const {
  arith::Analyzer analyzer;
  auto input_iters = getVarMap();
  input_iters.Set(ReplicationPlaceholder(), {0, ReplicateExtent()});
  PrimExpr new_forward_thread;
  IterVar new_thread_replicate;
593
594
595
  std::tie(new_forward_thread, new_thread_replicate) =
      CompressIterator(forward_thread_, ToIterVars(input_iters),
                       ReplicationPlaceholder(), &analyzer);
596
597
598
599
  return Fragment(input_size_, forward_index_, new_forward_thread,
                  new_thread_replicate->dom->extent, new_thread_replicate->var);
}

600
601
std::string LayoutNode::DebugOutput() const {
  std::stringstream ss;
602
603
604
  ss << "Layout(" << InputShape() << " -> " << OutputShape()
     << ", transform: " << GetForwardVars() << " -> " << GetForwardIndex()
     << ")";
605
  return ss.str();
606
607
}

608
609
std::string FragmentNode::DebugOutput() const {
  std::stringstream ss;
610
611
612
613
614
615
616
617
  ss << "Fragment(" << InputShape() << " -> " << OutputShape()
     << ", replicate: " << ReplicateExtent() << ", thread: " << ThreadExtent()
     << ", forward_thread: " << forward_thread_
     << ", forward_index: " << GetForwardIndex();
  if (thread_range_.defined()) {
    ss << ", thread_range: " << thread_range_;
  }
  ss << ")";
618
  return ss.str();
619
620
}

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
bool LayoutNode::IsEqual(const LayoutNode *other, bool skip_index) const {
  bool ret = StructuralEqual()(this->InputShape(), other->InputShape());
  ret &= StructuralEqual()(this->OutputShape(), other->OutputShape());
  if (!skip_index) {
    ret &= StructuralEqual()(this->forward_index_, other->forward_index_);
  }
  return ret;
}

bool FragmentNode::IsEqual(const FragmentNode *other, bool skip_index) const {
  // Fragment Layout Comparison can skip the index comparison
  // when the output shape is the same, as we can do
  // a[i, j] = b[j, i] in register level.

  bool ret = StructuralEqual()(this->InputShape(), other->InputShape());
636
637
638
639
  if (!ret) {
    // may be broadcast case
    return true;
  }
640
641
642
  if (this->thread_range_.defined() && other->thread_range_.defined()) {
    ret &= StructuralEqual()(this->thread_range_, other->thread_range_);
  }
643
644
645
646
647
648
649
650
651
  ret &= StructuralEqual()(this->OutputShape(), other->OutputShape());
  ret &= StructuralEqual()(this->ReplicateExtent(), other->ReplicateExtent());
  ret &= StructuralEqual()(this->ThreadExtent(), other->ThreadExtent());
  if (!skip_index) {
    ret &= StructuralEqual()(this->forward_index_, other->forward_index_);
  }
  return ret;
}

652
653
654
655
656
657
658
void FragmentNode::RegisterReflection() {
  namespace refl = tvm::ffi::reflection;
  refl::ObjectDef<FragmentNode>()
      .def_ro("forward_thread", &FragmentNode::forward_thread_)
      .def_ro("replicate_size", &FragmentNode::replicate_size_);
}

659
TVM_FFI_STATIC_INIT_BLOCK() {
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef()
      .def_packed("tl.Layout",
                  [](PackedArgs args, Any *rv) {
                    *rv = Layout(args[0].cast<Array<IterVar>>(),
                                 args[1].cast<Array<PrimExpr>>());
                  })
      .def("tl.Layout_input_shape",
           [](Layout layout) { return layout->InputShape(); })
      .def("tl.Layout_output_shape",
           [](Layout layout) { return layout->OutputShape(); })
      .def("tl.Layout_inverse", [](Layout layout) { return layout->Inverse(); })
      .def("tl.Layout_index",
           [](Layout layout) { return layout->GetForwardIndex(); })
      .def("tl.Layout_forward_vars",
           [](Layout layout) { return layout->GetForwardVars(); })
676
677
678
679
680
      .def("tl.Layout_is_equal",
           [](Layout layout, Layout other) {
             const LayoutNode *other_node = other.as<LayoutNode>();
             return layout->IsEqual(other_node);
           })
681
682
683
684
685
686
687
688
      .def_packed("tl.Fragment",
                  [](PackedArgs args, Any *rv) {
                    *rv = Fragment(
                        /*forward_var=*/args[0].cast<Array<IterVar>>(),
                        /*forward_index=*/args[1].cast<Array<PrimExpr>>(),
                        /*forward_thread=*/args[2].cast<PrimExpr>(),
                        /*thread_replicate=*/args[3].cast<IterVar>());
                  })
689
690
691
692
693
      .def("tl.Fragment_is_equal",
           [](Fragment fragment, Fragment other) {
             const FragmentNode *other_node = other.as<FragmentNode>();
             return fragment->IsEqual(other_node);
           })
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
      .def("tl.Fragment_thread_size",
           [](Fragment fragment) { return fragment->ThreadExtent(); })
      .def("tl.Fragment_thread",
           [](Fragment fragment) { return fragment->GetForwardThread(); })
      .def("tl.Fragment_repeat",
           [](Fragment fragment, Array<PrimExpr> repeats, bool repeat_on_thread,
              bool lower_dim_first) {
             return fragment->Repeat(repeats, repeat_on_thread,
                                     lower_dim_first);
           })
      .def("tl.Fragment_replicate",
           [](Fragment fragment, int repeats) {
             return fragment->Replicate(repeats);
           })
      .def("tl.Fragment_condense_rep_var",
           [](Fragment fragment) { return fragment->CondenseReplicateVar(); })
      .def("tl.make_swizzled_layout",
711
712
713
714
715
716
717
718
719
720
           [](int stride, int continuous, int element_size, bool k_inner,
              bool allow_pad = true) {
             if (allow_pad) {
               return makeGemmABLayout(stride, continuous, continuous,
                                       element_size, k_inner);
             } else {
               return makeGemmABLayoutHopper(stride, continuous, continuous,
                                             element_size, k_inner);
             }
           })
721
722
723
724
725
      .def("tl.make_volta_swizzled_layout",
           [](int stride, int mat_continuous, bool is_a, bool k_inner) {
             return makeGemmVoltaABLayout(stride, mat_continuous, is_a,
                                          k_inner);
           })
726
727
728
729
730
      .def("tl.make_wgmma_swizzled_layout",
           [](int stride, int mat_continuous, int continuity, int element_size,
              bool k_inner) {
             return makeGemmABLayoutHopper(stride, mat_continuous, continuity,
                                           element_size, k_inner);
731
732
733
734
735
736
           })
      .def("tl.make_tcgen05mma_swizzled_layout",
           [](int stride, int mat_continuous, int continuity, int element_size,
              bool k_inner) {
             return makeGemmABLayoutSm100(stride, mat_continuous, continuity,
                                          element_size, k_inner);
737
738
           })
      .def("tl.make_full_bank_swizzled_layout",
739
           [](int stride, int continuous, int element_size) {
740
741
742
743
744
745
746
747
748
749
750
751
752
753
             return makeFullBankSwizzleLayout(stride, continuous, element_size);
           })
      .def("tl.make_half_bank_swizzled_layout",
           [](int stride, int continuous, int element_size) {
             return makeHalfBankSwizzleLayout(stride, continuous, element_size);
           })
      .def("tl.make_quarter_bank_swizzled_layout",
           [](int stride, int continuous, int element_size) {
             return makeQuarterBankSwizzleLayout(stride, continuous,
                                                 element_size);
           })
      .def("tl.make_linear_layout", [](int stride, int continuous) {
        return makeGemmLayoutLinear(stride, continuous);
      });
754
}
755

756
TVM_FFI_STATIC_INIT_BLOCK() {
757
758
759
  namespace refl = tvm::ffi::reflection;
  LayoutNode::RegisterReflection();
  FragmentNode::RegisterReflection();
760
}
761

762
763
} // namespace tl
} // namespace tvm