parallel.h 5.19 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*!
 * \file tl/op/parallel.h
 * \brief Infer layout from ops and parallel for
 */

#ifndef TVM_TL_OP_PARALLEL_H_
#define TVM_TL_OP_PARALLEL_H_

#include <tvm/target/target.h>
#include <tvm/tir/stmt_functor.h>

#include "../layout/layout.h"
13
14
#include "../transform/layout_reducer.h"
#include "./operator.h"
15

16
/**
17
18
 * Conjoin `expr` into the operator's predicate (logical AND). If no predicate
 * exists yet, `expr` becomes the predicate.
19
 *
20
 * @param expr Predicate expression to add.
21
 */
22
23
24
25
26
namespace tvm {
namespace tl {

using namespace tir;

27
28
29
30
31
32
33
34
35
36
37
38
39
40
class LayoutConflictException : public std::exception {
public:
  const char *what() const noexcept override { return msg_.c_str(); }
  LayoutConflictException(const std::string &msg) : msg_(msg) {}

private:
  std::string msg_;
};

bool ProveFragmentContains(Fragment small_frag, Fragment large_frag,
                           Array<PrimExpr> small_frag_indices,
                           Array<PrimExpr> large_frag_indices,
                           arith::Analyzer &analyzer_);

41
class ParallelOpNode;
42
43

class ParallelLoopNestVisitor : public StmtExprVisitor {
44
private:
45
46
47
48
  ParallelLoopNestVisitor(ParallelOpNode *op) : p(op){};
  void VisitStmt_(const ForNode *op) override;
  void VisitStmt_(const BufferStoreNode *op) override;
  void VisitExpr_(const BufferLoadNode *op) override;
49

50
  ParallelOpNode *p;
51

52
  friend class ParallelOpNode;
53
54
};

55
56
57
58
// ParallelOpNode represents a parallel for loop operator in TileLang.
// It is responsible for inferring layouts, holding loop structure, and managing
// predicates.
class ParallelOpNode : public TileOperatorNode {
59
public:
60
61
  // The root For loop node.
  For root_;
62
63
64
65
66
  // The inferred layout for the loop, mutable to allow lazy inference.
  mutable Fragment loop_layout_;
  // The predicate expression for the loop, if any, mutable for lazy
  // construction.
  mutable Optional<PrimExpr> predicate_;
67

68
69
70
71
  // Type key for TVM object system.
  static constexpr const char *_type_key = "tl.ParallelOp";
  TVM_DECLARE_FINAL_OBJECT_INFO(ParallelOpNode, TileOperatorNode);

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  static void RegisterReflection() {
    namespace refl = tvm::ffi::reflection;
    refl::ObjectDef<ParallelOpNode>()
        .def_ro("root", &ParallelOpNode::root_)
        .def_ro("loop_layout", &ParallelOpNode::loop_layout_)
        .def_ro("predicate", &ParallelOpNode::predicate_);
  }

  bool SEqualReduce(const ParallelOpNode *other, SEqualReducer equal) const {
    return equal(root_, other->root_) &&
           equal(loop_layout_, other->loop_layout_) &&
           equal(predicate_, other->predicate_);
  }

  void SHashReduce(SHashReducer hash_reduce) const {
    hash_reduce(root_);
    hash_reduce(loop_layout_);
    hash_reduce(predicate_);
  }
  static constexpr bool _type_has_method_sequal_reduce = true;
  static constexpr bool _type_has_method_shash_reduce = true;

94
95
96
97
98
99
100
101
102
103
104
105
  // Construct from a root For loop.
  ParallelOpNode(For root);

  // Lower the operator to a TIR statement.
  Stmt Lower(const LowerArgs &T, arith::Analyzer *analyzer) const override;

  // Infer the layout for this parallel operator.
  LayoutMap InferLayout(const LayoutInferArgs &T,
                        InferLevel level) const override;

  // Copy constructor for ParallelOpNode.
  ParallelOpNode(const ParallelOpNode &other) : ParallelOpNode(other.root_) {
106
107
108
109
    loop_layout_ = other.loop_layout_;
    predicate_ = other.predicate_;
  }

110
  // Get the inferred loop layout.
111
  Fragment GetLoopLayout() const { return loop_layout_; }
112
  // Get the root For loop.
113
  For GetRoot() const { return root_; }
114
  // Get the mapping from buffer to access indices.
115
  Map<Buffer, Array<PrimExpr>> GetIndiceMap() const { return indice_map_; }
116
  // Get the predicate for a given thread variable.
117
118
  Optional<PrimExpr> GetPredicate(Var thread_var) const;

119
  // Clone this operator.
120
  TileOperator Clone() const override;
121

122
private:
123
124
125
  // Complete the fragment layout for a given buffer.
  Fragment CompleteBufferFragment(const Buffer &buffer) const;
  // Check if the buffer is accessed with common indices (i.e., loop variables).
126
  bool IsCommonAccessIndice(const Buffer &buffer) const;
127
  // Add a predicate to the current predicate expression.
128
  void AddPredicate(const PrimExpr &expr) const {
129
130
    predicate_ = predicate_.defined() ? And(expr, predicate_.value()) : expr;
  }
131
132
  // Allow ParallelLoopNestVisitor to access private members.
  friend class ParallelLoopNestVisitor;
133

134
  // Visitor for collecting loop nest information.
135
  ParallelLoopNestVisitor V;
136
  // Mapping from buffer to their access indices in the loop.
137
  Map<Buffer, Array<PrimExpr>> indice_map_;
138
  // Set of buffers that are written to in the loop.
139
  std::unordered_set<Buffer, ObjectPtrHash, ObjectPtrEqual> buffer_is_write_;
140
  // The loop variables for the parallel loop nest.
141
  Array<IterVar> loop_vars_;
142
  // Analyzer for simplifying and analyzing expressions, mutable for lazy use.
143
  mutable arith::Analyzer analyzer_;
144
145
  // Mapping from buffer to reducer info.
  Map<Var, ReducerInfo> reducer_info_map_;
146
};
147

148
149
150
151
class ParallelOp : public TileOperator {
public:
  TVM_DEFINE_OBJECT_REF_METHODS(ParallelOp, TileOperator, ParallelOpNode);

152
  ParallelOp(const For &root) {
153
154
155
    auto op = make_object<ParallelOpNode>(root);
    data_ = std::move(op);
  }
156
157
};

158
159
} // namespace tl
} // namespace tvm
160

161
#endif // TVM_TL_OP_PARALLEL_H_