Commit d45c9d8f authored by Khalique's avatar Khalique
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into rm_identity

parents 281532ab 2b0b77f1
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operation.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct record_event
{
std::size_t event = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.event, "event"));
}
std::string name() const { return "gpu::record_event"; }
shape compute_shape(const std::vector<shape>&) const { return {}; }
argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{
ctx.get_stream().record(ctx.get_event(event));
return {};
}
void finalize(context& ctx, const shape&, const std::vector<shape>&)
{
ctx.create_events(event);
}
};
struct wait_event
{
std::size_t event = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.event, "event"));
}
std::string name() const { return "gpu::wait_event"; }
shape compute_shape(const std::vector<shape>&) const { return {}; }
argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{
ctx.get_stream().wait(ctx.get_event(event));
return {};
}
};
struct set_stream
{
std::size_t stream = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.stream, "stream"));
}
std::string name() const { return "gpu::set_stream"; }
shape compute_shape(const std::vector<shape>&) const { return {}; }
argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{
ctx.set_stream(stream);
return {};
}
void finalize(context& ctx, const shape&, const std::vector<shape>&) { ctx.set_stream(stream); }
};
std::size_t schedule_model::concurrency() const { return streams; }
void schedule_model::sched(program& p, instruction_ref ins, std::size_t n) const
{
auto last_stream = std::find_if(std::make_reverse_iterator(ins),
std::make_reverse_iterator(p.begin()),
[&](auto&& i) { return i.name() == "gpu::set_stream"; });
if(last_stream != std::make_reverse_iterator(p.begin()))
{
auto&& op = any_cast<set_stream>(last_stream->get_operator());
// If the same stream was set earlier then skip
if(op.stream == n)
return;
}
p.insert_instruction(ins, set_stream{n});
}
void schedule_model::wait(program& p, instruction_ref ins, std::size_t wait_id) const
{
p.insert_instruction(ins, wait_event{wait_id});
}
void schedule_model::record(program& p, instruction_ref ins, std::size_t wait_id) const
{
p.insert_instruction(std::next(ins), record_event{wait_id});
}
static std::unordered_map<std::string, std::size_t> create_weight_map()
{
return {
{"hip::load_literal", 0},
{"hip::allocate", 0},
{"gpu::convolution", 4},
{"gpu::conv_bias_relu", 4},
{"gpu::pooling", 2},
{"gpu::gemm", 2},
{"gpu::concat", 1},
{"hip::add_relu", 2},
};
}
static const std::unordered_map<std::string, std::size_t>& weight_map()
{
static std::unordered_map<std::string, std::size_t> m = create_weight_map();
return m;
}
std::size_t schedule_model::weight(const operation& op) const
{
if(weight_map().count(op.name()) == 0)
{
return 1;
}
return weight_map().at(op.name());
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <migraphx/eliminate_concat.hpp> #include <migraphx/eliminate_concat.hpp>
#include <migraphx/eliminate_identity.hpp> #include <migraphx/eliminate_identity.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp> #include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/schedule.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -53,7 +55,9 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const ...@@ -53,7 +55,9 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const
fuse_ops{&ctx}, fuse_ops{&ctx},
dead_code_elimination{}, dead_code_elimination{},
write_literals{&ctx}, write_literals{&ctx},
schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}},
memory_coloring{"hip::allocate"}, memory_coloring{"hip::allocate"},
dead_code_elimination{},
eliminate_workspace{}, eliminate_workspace{},
eliminate_allocation{"hip::allocate"}, eliminate_allocation{"hip::allocate"},
check_context<context>{}, check_context<context>{},
......
...@@ -36,6 +36,18 @@ inline std::ostream& operator<<(std::ostream& s, std::nullptr_t) ...@@ -36,6 +36,18 @@ inline std::ostream& operator<<(std::ostream& s, std::nullptr_t)
return s; return s;
} }
template <class T>
inline std::ostream& operator<<(std::ostream& s, const std::vector<T>& v)
{
s << "{ ";
for(auto&& x : v)
{
s << x << ", ";
}
s << "}";
return s;
}
template <class T, class U, class Operator> template <class T, class U, class Operator>
struct expression struct expression
{ {
......
...@@ -683,4 +683,14 @@ TEST_CASE(logsoftmax) ...@@ -683,4 +683,14 @@ TEST_CASE(logsoftmax)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(no_pad_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = migraphx::parse_onnx("no_pad_test.onnx");
EXPECT(p == prog);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
This diff is collapsed.
#ifndef MIGRAPHX_GUARD_SCHEDULE_MODEL_HPP
#define MIGRAPHX_GUARD_SCHEDULE_MODEL_HPP
#include <cassert>
#include <string>
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include <migraphx/config.hpp>
#include <migraphx/instruction_ref.hpp>
#include <vector>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct program;
struct operation;
#ifdef DOXYGEN
/// An interface for target-dependent model for the scheduler
struct schedule_model
{
/// Get the number of concurrent instruction allowed
std::size_t concurrency() const;
/// Schedule a concurrent instruction
void sched(program& p, instruction_ref ins, std::size_t n) const;
// Insert necessary waits before an instruction
void wait(program& p, instruction_ref ins, std::size_t wait_id) const;
// Insert necessary records after an instruction
void record(program& p, instruction_ref ins, std::size_t wait_id) const;
/// Compute weights for an operation
std::size_t weight(const operation& op) const;
};
#else
<%
interface('schedule_model',
virtual('concurrency', returns='std::size_t', const=True),
virtual('sched', p='program&', ins='instruction_ref', n='std::size_t', const=True),
virtual('wait', p='program&', ins='instruction_ref', wait_id='std::size_t', const=True),
virtual('record', p='program&', ins='instruction_ref', wait_id='std::size_t', const=True),
virtual('weight', returns='std::size_t', op='const operation&', const=True)
)
%>
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
import string, sys, re, os import string, sys, re, os
trivial = [
'std::size_t',
'instruction_ref'
]
headers = ''' headers = '''
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
...@@ -286,7 +292,7 @@ def convert_member(d, struct_name): ...@@ -286,7 +292,7 @@ def convert_member(d, struct_name):
member['this'] = x member['this'] = x
if 'const' in t: if 'const' in t:
member['member_const'] = 'const' member['member_const'] = 'const'
if t.endswith(('&', '*')): if t.endswith(('&', '*')) or t in trivial:
if use_member: member_args.append(x) if use_member: member_args.append(x)
args.append(arg_name) args.append(arg_name)
else: else:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment