Unverified Commit 2466dd6f authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Refactor program to module (#684)



* code backup

* clang format

* change corresponding tool files

* clang format
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent de10423f
#ifndef MIGRAPHX_GUARD_RTGLIB_CPU_LOWERING_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_CPU_LOWERING_HPP
#define MIGRAPHX_GUARD_RTGLIB_CPU_LOWERING_HPP #define MIGRAPHX_GUARD_RTGLIB_CPU_LOWERING_HPP
#include <migraphx/program.hpp> #include <string>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program;
using module = program;
namespace cpu { namespace cpu {
struct lowering struct lowering
{ {
std::string name() const { return "cpu::lowering"; } std::string name() const { return "cpu::lowering"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace cpu } // namespace cpu
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <migraphx/cpu/gemm.hpp> #include <migraphx/cpu/gemm.hpp>
#include <migraphx/register_op.hpp> #include <migraphx/register_op.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
#include <iostream> #include <iostream>
...@@ -882,7 +883,7 @@ MIGRAPHX_REGISTER_OP(cpu_rnn_var_sl_last_output) ...@@ -882,7 +883,7 @@ MIGRAPHX_REGISTER_OP(cpu_rnn_var_sl_last_output)
struct cpu_apply struct cpu_apply
{ {
program* prog; module* prog;
std::unordered_map<std::string, std::function<void(instruction_ref)>> apply_map{}; std::unordered_map<std::string, std::function<void(instruction_ref)>> apply_map{};
template <class T> template <class T>
...@@ -967,7 +968,7 @@ struct cpu_apply ...@@ -967,7 +968,7 @@ struct cpu_apply
} }
}; };
void lowering::apply(program& p) const { cpu_apply{&p}.apply(); } void lowering::apply(module& p) const { cpu_apply{&p}.apply(); }
} // namespace cpu } // namespace cpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -7,7 +7,7 @@ namespace migraphx { ...@@ -7,7 +7,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void adjust_allocation::apply(program& p) const void adjust_allocation::apply(module& p) const
{ {
for(auto ins : iterator_for(p)) for(auto ins : iterator_for(p))
{ {
......
...@@ -28,7 +28,7 @@ struct hip_stream_model ...@@ -28,7 +28,7 @@ struct hip_stream_model
bool is_wait(migraphx::instruction_ref ins) const { return ins->name() == "gpu::wait_event"; } bool is_wait(migraphx::instruction_ref ins) const { return ins->name() == "gpu::wait_event"; }
}; };
stream_model make_stream_model(const program& p) stream_model make_stream_model(const module& p)
{ {
hip_stream_model m; hip_stream_model m;
std::size_t stream = 0; std::size_t stream = 0;
...@@ -49,7 +49,7 @@ stream_model make_stream_model(const program& p) ...@@ -49,7 +49,7 @@ stream_model make_stream_model(const program& p)
return m; return m;
} }
std::vector<stream_race> analyze_streams(const program& p) std::vector<stream_race> analyze_streams(const module& p)
{ {
return migraphx::analyze_streams(p, make_stream_model(p)); return migraphx::analyze_streams(p, make_stream_model(p));
} }
......
...@@ -11,7 +11,7 @@ namespace migraphx { ...@@ -11,7 +11,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void eliminate_workspace::apply(program& p) const void eliminate_workspace::apply(module& p) const
{ {
std::size_t n = 0; std::size_t n = 0;
std::vector<instruction_ref> allocs; std::vector<instruction_ref> allocs;
......
...@@ -322,7 +322,7 @@ struct find_layernorm ...@@ -322,7 +322,7 @@ struct find_layernorm
auto matcher() const { return layernorm_onnx(); } auto matcher() const { return layernorm_onnx(); }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto ins = r.result; auto ins = r.result;
auto x_ins = r.instructions["x"]; auto x_ins = r.instructions["x"];
...@@ -368,7 +368,7 @@ struct find_gelu ...@@ -368,7 +368,7 @@ struct find_gelu
return match::unordered_tree("gpu::mul", one_half(), add_erf(), match::any()); return match::unordered_tree("gpu::mul", one_half(), add_erf(), match::any());
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto ins = r.result; auto ins = r.result;
auto x_ins = r.instructions["x"]; auto x_ins = r.instructions["x"];
...@@ -385,7 +385,7 @@ struct find_add_gelu ...@@ -385,7 +385,7 @@ struct find_add_gelu
return match::name("gpu::gelu")(match::arg(0)(match::name("gpu::add").bind("add"))); return match::name("gpu::gelu")(match::arg(0)(match::name("gpu::add").bind("add")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto add_ins = r.instructions["add"]; auto add_ins = r.instructions["add"];
auto ins = r.result; auto ins = r.result;
...@@ -429,7 +429,7 @@ struct find_gelu_new ...@@ -429,7 +429,7 @@ struct find_gelu_new
match::either_arg(0, 1)(match::args(match::has_value(0.5f)), tanh_fn())))))); match::either_arg(0, 1)(match::args(match::has_value(0.5f)), tanh_fn()))))));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto ins = r.result; auto ins = r.result;
auto x_ins = r.instructions["x"]; auto x_ins = r.instructions["x"];
...@@ -449,7 +449,7 @@ struct find_add_gelu_new ...@@ -449,7 +449,7 @@ struct find_add_gelu_new
return match::name("gpu::gelu_new")(match::arg(0)(match::name("gpu::add").bind("add"))); return match::name("gpu::gelu_new")(match::arg(0)(match::name("gpu::add").bind("add")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto add_ins = r.instructions["add"]; auto add_ins = r.instructions["add"];
auto ins = r.result; auto ins = r.result;
...@@ -473,7 +473,7 @@ struct find_add_clip ...@@ -473,7 +473,7 @@ struct find_add_clip
.bind("add"))); .bind("add")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto add_ins = r.instructions["add"]; auto add_ins = r.instructions["add"];
auto ins = r.result; auto ins = r.result;
...@@ -508,7 +508,7 @@ struct find_add_unary ...@@ -508,7 +508,7 @@ struct find_add_unary
.bind("add"))); .bind("add")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto add_ins = r.instructions["add"]; auto add_ins = r.instructions["add"];
auto ins = r.result; auto ins = r.result;
...@@ -536,7 +536,7 @@ struct find_triadd ...@@ -536,7 +536,7 @@ struct find_triadd
.bind("input"))); .bind("input")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto add_ins = r.instructions["add"]; auto add_ins = r.instructions["add"];
auto input_ins = r.instructions["input"]; auto input_ins = r.instructions["input"];
...@@ -563,7 +563,7 @@ struct find_mul_add ...@@ -563,7 +563,7 @@ struct find_mul_add
match::name("gpu::mul")(match::used_once()).bind("mul"), match::any().bind("b"))); match::name("gpu::mul")(match::used_once()).bind("mul"), match::any().bind("b")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto mul_ins = r.instructions["mul"]; auto mul_ins = r.instructions["mul"];
auto b_ins = r.instructions["b"]; auto b_ins = r.instructions["b"];
...@@ -588,7 +588,7 @@ struct find_mul_add_relu ...@@ -588,7 +588,7 @@ struct find_mul_add_relu
match::arg(0)(match::name("gpu::mul_add")(match::used_once()).bind("mul_add"))); match::arg(0)(match::name("gpu::mul_add")(match::used_once()).bind("mul_add")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto mul_add_ins = r.instructions["mul_add"]; auto mul_add_ins = r.instructions["mul_add"];
auto ins = r.result; auto ins = r.result;
...@@ -704,7 +704,7 @@ auto conv_bias(Ms... ms) ...@@ -704,7 +704,7 @@ auto conv_bias(Ms... ms)
} }
template <class Op> template <class Op>
void apply_conv_bias(context& ctx, program& p, match::matcher_result r) void apply_conv_bias(context& ctx, module& p, match::matcher_result r)
{ {
auto conv_ins = r.instructions["conv"]; auto conv_ins = r.instructions["conv"];
auto bias_ins = r.instructions["bias"]; auto bias_ins = r.instructions["bias"];
...@@ -731,7 +731,7 @@ struct find_conv_bias ...@@ -731,7 +731,7 @@ struct find_conv_bias
match::output(match::name(std::unordered_set<std::string>{"gpu::relu"})))); match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
apply_conv_bias<miopen_conv_bias>(*ctx, p, std::move(r)); apply_conv_bias<miopen_conv_bias>(*ctx, p, std::move(r));
} }
...@@ -742,7 +742,7 @@ struct find_conv_bias_relu ...@@ -742,7 +742,7 @@ struct find_conv_bias_relu
context* ctx = nullptr; context* ctx = nullptr;
auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); } auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
apply_conv_bias<miopen_conv_bias_relu>(*ctx, p, std::move(r)); apply_conv_bias<miopen_conv_bias_relu>(*ctx, p, std::move(r));
} }
...@@ -758,7 +758,7 @@ struct find_gemm_add ...@@ -758,7 +758,7 @@ struct find_gemm_add
match::name("gpu::gemm")(match::nargs(3)).bind("gemm"))); match::name("gpu::gemm")(match::nargs(3)).bind("gemm")));
} }
void apply(program& p, match::matcher_result r) const void apply(module& p, match::matcher_result r) const
{ {
auto ins = r.result; auto ins = r.result;
auto gemm_ins = r.instructions["gemm"]; auto gemm_ins = r.instructions["gemm"];
...@@ -800,7 +800,7 @@ struct find_commutative_broadcast ...@@ -800,7 +800,7 @@ struct find_commutative_broadcast
return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape())); return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape()));
} }
void apply(program& p, const match::matcher_result& r) const void apply(module& p, const match::matcher_result& r) const
{ {
auto ins = r.result; auto ins = r.result;
auto args = ins->inputs(); auto args = ins->inputs();
...@@ -810,7 +810,7 @@ struct find_commutative_broadcast ...@@ -810,7 +810,7 @@ struct find_commutative_broadcast
} }
}; };
void fuse_ops::apply(program& p) const void fuse_ops::apply(module& p) const
{ {
match::find_matches(p, find_gelu{}, find_gelu_new{fast_math}); match::find_matches(p, find_gelu{}, find_gelu_new{fast_math});
run_passes(p, {dead_code_elimination{}}); run_passes(p, {dead_code_elimination{}});
......
...@@ -13,7 +13,7 @@ namespace gpu { ...@@ -13,7 +13,7 @@ namespace gpu {
struct adjust_allocation struct adjust_allocation
{ {
std::string name() const { return "gpu::adjust_allocation"; } std::string name() const { return "gpu::adjust_allocation"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
......
...@@ -6,9 +6,13 @@ ...@@ -6,9 +6,13 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program;
using module = program;
namespace gpu { namespace gpu {
std::vector<stream_race> analyze_streams(const program& p); std::vector<stream_race> analyze_streams(const module& p);
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -8,13 +8,14 @@ ...@@ -8,13 +8,14 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program; struct program;
using module = program;
namespace gpu { namespace gpu {
struct eliminate_workspace struct eliminate_workspace
{ {
std::string name() const { return "eliminate_workspace"; } std::string name() const { return "eliminate_workspace"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
#ifndef MIGRAPHX_GUARD_RTGLIB_FUSE_OPS_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_FUSE_OPS_HPP
#define MIGRAPHX_GUARD_RTGLIB_FUSE_OPS_HPP #define MIGRAPHX_GUARD_RTGLIB_FUSE_OPS_HPP
#include <migraphx/program.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program;
using module = program;
namespace gpu { namespace gpu {
struct fuse_ops struct fuse_ops
...@@ -15,7 +17,7 @@ struct fuse_ops ...@@ -15,7 +17,7 @@ struct fuse_ops
context* ctx = nullptr; context* ctx = nullptr;
bool fast_math = true; bool fast_math = true;
std::string name() const { return "gpu::fuse_ops"; } std::string name() const { return "gpu::fuse_ops"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
......
#ifndef MIGRAPHX_GUARD_RTGLIB_MIOPEN_LOWERING_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_MIOPEN_LOWERING_HPP
#define MIGRAPHX_GUARD_RTGLIB_MIOPEN_LOWERING_HPP #define MIGRAPHX_GUARD_RTGLIB_MIOPEN_LOWERING_HPP
#include <migraphx/program.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct program;
using module = program;
namespace gpu {
struct lowering struct lowering
{ {
context* ctx; context* ctx;
bool offload_copy; bool offload_copy;
std::string name() const { return "gpu::lowering"; } std::string name() const { return "gpu::lowering"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
......
...@@ -13,7 +13,7 @@ namespace gpu { ...@@ -13,7 +13,7 @@ namespace gpu {
struct pack_int8_args struct pack_int8_args
{ {
std::string name() const { return "gpu::pack_int8_args"; } std::string name() const { return "gpu::pack_int8_args"; }
void apply(program& p) const; void apply(module& p) const;
shape pack_int8_shape(const shape& s) const; shape pack_int8_shape(const shape& s) const;
}; };
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program; struct program;
using module = program;
namespace gpu { namespace gpu {
...@@ -17,7 +18,7 @@ struct preallocate_param ...@@ -17,7 +18,7 @@ struct preallocate_param
std::string param{}; std::string param{};
context* ctx = nullptr; context* ctx = nullptr;
std::string name() const { return "preallocate_param"; } std::string name() const { return "preallocate_param"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -9,6 +9,7 @@ namespace migraphx { ...@@ -9,6 +9,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program; struct program;
using module = program;
struct operation; struct operation;
namespace gpu { namespace gpu {
...@@ -17,9 +18,9 @@ struct schedule_model ...@@ -17,9 +18,9 @@ struct schedule_model
{ {
std::size_t streams = 0; std::size_t streams = 0;
std::size_t concurrency() const; std::size_t concurrency() const;
void sched(program& p, instruction_ref ins, std::size_t n) const; void sched(module& p, instruction_ref ins, std::size_t n) const;
void wait(program& p, instruction_ref ins, std::size_t wait_id) const; void wait(module& p, instruction_ref ins, std::size_t wait_id) const;
void record(program& p, instruction_ref ins, std::size_t wait_id) const; void record(module& p, instruction_ref ins, std::size_t wait_id) const;
std::size_t weight(const operation& op) const; std::size_t weight(const operation& op) const;
}; };
......
...@@ -9,13 +9,14 @@ ...@@ -9,13 +9,14 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program; struct program;
using module = program;
namespace gpu { namespace gpu {
struct sync_device struct sync_device
{ {
std::string name() const { return "sync_device"; } std::string name() const { return "sync_device"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
#ifndef MIGRAPHX_GUARD_RTGLIB_MIOPEN_WRITE_LITERALS_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_MIOPEN_WRITE_LITERALS_HPP
#define MIGRAPHX_GUARD_RTGLIB_MIOPEN_WRITE_LITERALS_HPP #define MIGRAPHX_GUARD_RTGLIB_MIOPEN_WRITE_LITERALS_HPP
#include <migraphx/program.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program;
using module = program;
namespace gpu { namespace gpu {
...@@ -14,7 +15,7 @@ struct write_literals ...@@ -14,7 +15,7 @@ struct write_literals
context* ctx = nullptr; context* ctx = nullptr;
std::string name() const { return "gpu::write_literals"; } std::string name() const { return "gpu::write_literals"; }
void apply(program& p) const; void apply(module& p) const;
}; };
} // namespace gpu } // namespace gpu
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <migraphx/gpu/quant_convolution.hpp> #include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp> #include <migraphx/gpu/rocblas.hpp>
#include <migraphx/iterator_for.hpp> #include <migraphx/iterator_for.hpp>
#include <migraphx/program.hpp>
#include <utility> #include <utility>
#include <functional> #include <functional>
#include <algorithm> #include <algorithm>
...@@ -44,7 +45,7 @@ namespace gpu { ...@@ -44,7 +45,7 @@ namespace gpu {
struct miopen_apply struct miopen_apply
{ {
program* prog = nullptr; module* prog = nullptr;
const lowering* pass = nullptr; const lowering* pass = nullptr;
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{}; std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
instruction_ref last{}; instruction_ref last{};
...@@ -395,7 +396,7 @@ struct miopen_apply ...@@ -395,7 +396,7 @@ struct miopen_apply
} }
}; };
void lowering::apply(program& p) const { miopen_apply{&p, this}.apply(); } void lowering::apply(module& p) const { miopen_apply{&p, this}.apply(); }
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -10,7 +10,7 @@ namespace migraphx { ...@@ -10,7 +10,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void pack_int8_args::apply(program& p) const void pack_int8_args::apply(module& p) const
{ {
for(auto ins : iterator_for(p)) for(auto ins : iterator_for(p))
{ {
......
...@@ -10,7 +10,7 @@ namespace migraphx { ...@@ -10,7 +10,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void preallocate_param::apply(program& p) const void preallocate_param::apply(module& p) const
{ {
for(auto ins : iterator_for(p)) for(auto ins : iterator_for(p))
{ {
......
...@@ -77,7 +77,7 @@ MIGRAPHX_REGISTER_OP(wait_event) ...@@ -77,7 +77,7 @@ MIGRAPHX_REGISTER_OP(wait_event)
MIGRAPHX_REGISTER_OP(set_stream) MIGRAPHX_REGISTER_OP(set_stream)
std::size_t schedule_model::concurrency() const { return streams; } std::size_t schedule_model::concurrency() const { return streams; }
void schedule_model::sched(program& p, instruction_ref ins, std::size_t n) const void schedule_model::sched(module& p, instruction_ref ins, std::size_t n) const
{ {
auto last_stream = std::find_if(std::make_reverse_iterator(ins), auto last_stream = std::find_if(std::make_reverse_iterator(ins),
std::make_reverse_iterator(p.begin()), std::make_reverse_iterator(p.begin()),
...@@ -92,11 +92,11 @@ void schedule_model::sched(program& p, instruction_ref ins, std::size_t n) const ...@@ -92,11 +92,11 @@ void schedule_model::sched(program& p, instruction_ref ins, std::size_t n) const
p.insert_instruction(ins, set_stream{n}); p.insert_instruction(ins, set_stream{n});
} }
void schedule_model::wait(program& p, instruction_ref ins, std::size_t wait_id) const void schedule_model::wait(module& p, instruction_ref ins, std::size_t wait_id) const
{ {
p.insert_instruction(ins, wait_event{wait_id}); p.insert_instruction(ins, wait_event{wait_id});
} }
void schedule_model::record(program& p, instruction_ref ins, std::size_t wait_id) const void schedule_model::record(module& p, instruction_ref ins, std::size_t wait_id) const
{ {
p.insert_instruction(std::next(ins), record_event{wait_id}); p.insert_instruction(std::next(ins), record_event{wait_id});
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment