Unverified Commit a27dd28c authored by shivadbhavsar's avatar shivadbhavsar Committed by GitHub
Browse files

renamed variables for module from p to m (#1204)

Updated variable names according to #1193
parent 8c94ad07
...@@ -15,7 +15,7 @@ namespace gpu { ...@@ -15,7 +15,7 @@ namespace gpu {
struct sync_device struct sync_device
{ {
std::string name() const { return "sync_device"; } std::string name() const { return "sync_device"; }
void apply(module& p) const; void apply(module& m) const;
}; };
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -14,7 +14,7 @@ struct write_literals ...@@ -14,7 +14,7 @@ struct write_literals
context* ctx = nullptr; context* ctx = nullptr;
std::string name() const { return "gpu::write_literals"; } std::string name() const { return "gpu::write_literals"; }
void apply(module& p) const; void apply(module& m) const;
}; };
} // namespace gpu } // namespace gpu
......
...@@ -77,28 +77,28 @@ MIGRAPHX_REGISTER_OP(wait_event) ...@@ -77,28 +77,28 @@ MIGRAPHX_REGISTER_OP(wait_event)
MIGRAPHX_REGISTER_OP(set_stream) MIGRAPHX_REGISTER_OP(set_stream)
std::size_t schedule_model::concurrency() const { return streams; } std::size_t schedule_model::concurrency() const { return streams; }
void schedule_model::sched(module& p, instruction_ref ins, std::size_t n) const void schedule_model::sched(module& m, instruction_ref ins, std::size_t n) const
{ {
auto last_stream = std::find_if(std::make_reverse_iterator(ins), auto last_stream = std::find_if(std::make_reverse_iterator(ins),
std::make_reverse_iterator(p.begin()), std::make_reverse_iterator(m.begin()),
[&](auto&& i) { return i.name() == "gpu::set_stream"; }); [&](auto&& i) { return i.name() == "gpu::set_stream"; });
if(last_stream != std::make_reverse_iterator(p.begin())) if(last_stream != std::make_reverse_iterator(m.begin()))
{ {
auto&& op = any_cast<set_stream>(last_stream->get_operator()); auto&& op = any_cast<set_stream>(last_stream->get_operator());
// If the same stream was set earlier then skip // If the same stream was set earlier then skip
if(op.stream == n) if(op.stream == n)
return; return;
} }
p.insert_instruction(ins, set_stream{n}); m.insert_instruction(ins, set_stream{n});
} }
void schedule_model::wait(module& p, instruction_ref ins, std::size_t wait_id) const void schedule_model::wait(module& m, instruction_ref ins, std::size_t wait_id) const
{ {
p.insert_instruction(ins, wait_event{wait_id}); m.insert_instruction(ins, wait_event{wait_id});
} }
void schedule_model::record(module& p, instruction_ref ins, std::size_t wait_id) const void schedule_model::record(module& m, instruction_ref ins, std::size_t wait_id) const
{ {
p.insert_instruction(std::next(ins), record_event{wait_id}); m.insert_instruction(std::next(ins), record_event{wait_id});
} }
static std::unordered_map<std::string, std::size_t> create_weight_map() static std::unordered_map<std::string, std::size_t> create_weight_map()
......
...@@ -8,9 +8,9 @@ namespace migraphx { ...@@ -8,9 +8,9 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void sync_device::apply(module& p) const void sync_device::apply(module& m) const
{ {
auto last = std::prev(p.end()); auto last = std::prev(m.end());
if(last->name() == "@return") if(last->name() == "@return")
{ {
auto inputs = last->inputs(); auto inputs = last->inputs();
...@@ -18,10 +18,10 @@ void sync_device::apply(module& p) const ...@@ -18,10 +18,10 @@ void sync_device::apply(module& p) const
return (i->name() == "hip::copy_from_gpu"); return (i->name() == "hip::copy_from_gpu");
})) }))
{ {
auto sync_in = p.insert_instruction(last, make_op("hip::sync_stream"), inputs); auto sync_in = m.insert_instruction(last, make_op("hip::sync_stream"), inputs);
if(not inputs.empty()) if(not inputs.empty())
{ {
p.replace_instruction(inputs.front(), sync_in); m.replace_instruction(inputs.front(), sync_in);
} }
} }
} }
......
...@@ -11,25 +11,25 @@ namespace gpu { ...@@ -11,25 +11,25 @@ namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_COPY_LITERALS) MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_COPY_LITERALS)
void write_literals::apply(module& p) const void write_literals::apply(module& m) const
{ {
assert(ctx != nullptr); assert(ctx != nullptr);
std::size_t n = 0; std::size_t n = 0;
for(auto ins : iterator_for(p)) for(auto ins : iterator_for(m))
{ {
if(ins->name() == "@literal") if(ins->name() == "@literal")
{ {
if(enabled(MIGRAPHX_COPY_LITERALS{})) if(enabled(MIGRAPHX_COPY_LITERALS{}))
{ {
literal l = ins->get_literal(); literal l = ins->get_literal();
auto pre = p.add_literal(l); auto pre = m.add_literal(l);
auto alloc = p.insert_instruction(std::next(pre), hip_allocate{l.get_shape()}); auto alloc = m.insert_instruction(std::next(pre), hip_allocate{l.get_shape()});
p.replace_instruction(ins, hip_copy_to_gpu{}, pre, alloc); m.replace_instruction(ins, hip_copy_to_gpu{}, pre, alloc);
} }
else else
{ {
std::string id = p.name() + ":@literal:" + std::to_string(n); std::string id = m.name() + ":@literal:" + std::to_string(n);
p.replace_instruction(ins, hip_copy_literal{ins->get_literal(), id}); m.replace_instruction(ins, hip_copy_literal{ins->get_literal(), id});
n++; n++;
} }
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment