parallel.h 4.83 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*!
 * \file tl/op/parallel.h
 * \brief Infer layout from ops and parallel for
 */

#ifndef TVM_TL_OP_PARALLEL_H_
#define TVM_TL_OP_PARALLEL_H_

#include <tvm/target/target.h>
#include <tvm/tir/stmt_functor.h>

#include "../layout/layout.h"
13
14
#include "../transform/layout_reducer.h"
#include "./operator.h"
15

16
/**
17
18
 * Conjoin `expr` into the operator's predicate (logical AND). If no predicate
 * exists yet, `expr` becomes the predicate.
19
 *
20
 * @param expr Predicate expression to add.
21
 */
22
23
24
25
26
namespace tvm {
namespace tl {

using namespace tir;

27
28
29
30
31
32
33
34
35
36
37
38
39
40
class LayoutConflictException : public std::exception {
public:
  const char *what() const noexcept override { return msg_.c_str(); }
  LayoutConflictException(const std::string &msg) : msg_(msg) {}

private:
  std::string msg_;
};

bool ProveFragmentContains(Fragment small_frag, Fragment large_frag,
                           Array<PrimExpr> small_frag_indices,
                           Array<PrimExpr> large_frag_indices,
                           arith::Analyzer &analyzer_);

41
class ParallelOpNode;
42
43

class ParallelLoopNestVisitor : public StmtExprVisitor {
44
private:
45
  ParallelLoopNestVisitor(ParallelOpNode *op) : p(op) {};
46
47
48
  void VisitStmt_(const ForNode *op) override;
  void VisitStmt_(const BufferStoreNode *op) override;
  void VisitExpr_(const BufferLoadNode *op) override;
49

50
  ParallelOpNode *p;
51

52
  friend class ParallelOpNode;
53
54
};

55
56
57
58
// ParallelOpNode represents a parallel for loop operator in TileLang.
// It is responsible for inferring layouts, holding loop structure, and managing
// predicates.
class ParallelOpNode : public TileOperatorNode {
59
public:
60
61
  // The root For loop node.
  For root_;
62
63
64
65
66
  // The inferred layout for the loop, mutable to allow lazy inference.
  mutable Fragment loop_layout_;
  // The predicate expression for the loop, if any, mutable for lazy
  // construction.
  mutable Optional<PrimExpr> predicate_;
67

68
  // Type key for TVM object system.
69
70
  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("tl.ParallelOp", ParallelOpNode,
                                    TileOperatorNode);
71

72
73
74
75
76
77
78
79
  static void RegisterReflection() {
    namespace refl = tvm::ffi::reflection;
    refl::ObjectDef<ParallelOpNode>()
        .def_ro("root", &ParallelOpNode::root_)
        .def_ro("loop_layout", &ParallelOpNode::loop_layout_)
        .def_ro("predicate", &ParallelOpNode::predicate_);
  }

80
81
82
83
84
85
86
87
88
89
90
91
  // Construct from a root For loop.
  ParallelOpNode(For root);

  // Lower the operator to a TIR statement.
  Stmt Lower(const LowerArgs &T, arith::Analyzer *analyzer) const override;

  // Infer the layout for this parallel operator.
  LayoutMap InferLayout(const LayoutInferArgs &T,
                        InferLevel level) const override;

  // Copy constructor for ParallelOpNode.
  ParallelOpNode(const ParallelOpNode &other) : ParallelOpNode(other.root_) {
92
93
94
95
    loop_layout_ = other.loop_layout_;
    predicate_ = other.predicate_;
  }

96
  // Get the inferred loop layout.
97
  Fragment GetLoopLayout() const { return loop_layout_; }
98
  // Get the root For loop.
99
  For GetRoot() const { return root_; }
100
  // Get the mapping from buffer to access indices.
101
  Map<Buffer, Array<PrimExpr>> GetIndiceMap() const { return indice_map_; }
102
  // Get the predicate for a given thread variable.
103
104
  Optional<PrimExpr> GetPredicate(Var thread_var) const;

105
  // Clone this operator.
106
  TileOperator Clone() const override;
107

108
private:
109
110
111
  // Complete the fragment layout for a given buffer.
  Fragment CompleteBufferFragment(const Buffer &buffer) const;
  // Check if the buffer is accessed with common indices (i.e., loop variables).
112
  bool IsCommonAccessIndice(const Buffer &buffer) const;
113
  // Add a predicate to the current predicate expression.
114
  void AddPredicate(const PrimExpr &expr) const {
115
116
    predicate_ = predicate_.defined() ? And(expr, predicate_.value()) : expr;
  }
117

118
119
  // Allow ParallelLoopNestVisitor to access private members.
  friend class ParallelLoopNestVisitor;
120

121
  // Visitor for collecting loop nest information.
122
  ParallelLoopNestVisitor V;
123
  // Mapping from buffer to their access indices in the loop.
124
  Map<Buffer, Array<PrimExpr>> indice_map_;
125
  // Set of buffers that are written to in the loop.
126
  std::unordered_set<Buffer, ObjectPtrHash, ObjectPtrEqual> buffer_is_write_;
127
  // The loop variables for the parallel loop nest.
128
  Array<IterVar> loop_vars_;
129
130
  // The inner_vars_
  Map<Var, IterVar> inner_vars_;
131
  // Analyzer for simplifying and analyzing expressions, mutable for lazy use.
132
  mutable arith::Analyzer analyzer_;
133
134
  // Mapping from buffer to reducer info.
  Map<Var, ReducerInfo> reducer_info_map_;
135
};
136

137
138
class ParallelOp : public TileOperator {
public:
139
140
  TVM_FFI_DEFINE_OBJECT_REF_METHODS_NULLABLE(ParallelOp, TileOperator,
                                             ParallelOpNode);
141

142
  ParallelOp(const For &root) {
143
    auto op = tvm::ffi::make_object<ParallelOpNode>(root);
144
145
    data_ = std::move(op);
  }
146
147
};

148
149
} // namespace tl
} // namespace tvm
150

151
#endif // TVM_TL_OP_PARALLEL_H_