Commit 4a39a0f7 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into add-conv_bn_add-test

parents 5564172e bb827865
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_topk_1 : verify_program<test_topk_1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 5}};
auto data = mm->add_parameter("data", s);
auto r = mm->add_instruction(
migraphx::make_op("topk", {{"axis", -2}, {"k", 3}, {"largest", 1}}), data);
auto r0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), r);
auto r1 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), r);
mm->add_return({r0, r1});
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_topk_2 : verify_program<test_topk_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 5}};
auto data = mm->add_parameter("data", s);
auto r = mm->add_instruction(
migraphx::make_op("topk", {{"axis", 1}, {"k", 4}, {"largest", 0}}), data);
auto r0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), r);
mm->add_return({r0});
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_topk_3 : verify_program<test_topk_3>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 5}};
auto data = mm->add_parameter("data", s);
auto r = mm->add_instruction(
migraphx::make_op("topk", {{"axis", -2}, {"k", 3}, {"largest", 0}}), data);
auto r0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), r);
auto r1 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), r);
mm->add_return({r0, r1});
return p;
}
};
...@@ -11,7 +11,8 @@ struct test_trans_abs : verify_program<test_trans_abs> ...@@ -11,7 +11,8 @@ struct test_trans_abs : verify_program<test_trans_abs>
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), x); auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), x);
auto absx = mm->add_instruction(migraphx::make_op("abs"), tx); auto absx = mm->add_instruction(migraphx::make_op("abs"), tx);
auto r = mm->add_instruction(migraphx::make_op("add"), absx, absx); auto r = mm->add_instruction(migraphx::make_op("add"), absx, absx);
mm->add_instruction(migraphx::make_op("contiguous"), r); mm->add_instruction(migraphx::make_op("contiguous"), r);
......
...@@ -11,7 +11,8 @@ struct test_trans_ret : verify_program<test_trans_ret> ...@@ -11,7 +11,8 @@ struct test_trans_ret : verify_program<test_trans_ret>
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), x); auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), x);
mm->add_return({tx}); mm->add_return({tx});
return p; return p;
......
...@@ -11,7 +11,8 @@ struct test_trans_tanh : verify_program<test_trans_tanh> ...@@ -11,7 +11,8 @@ struct test_trans_tanh : verify_program<test_trans_tanh>
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), x); auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), x);
auto tanhx = mm->add_instruction(migraphx::make_op("tanh"), tx); auto tanhx = mm->add_instruction(migraphx::make_op("tanh"), tx);
auto r = mm->add_instruction(migraphx::make_op("add"), tanhx, tanhx); auto r = mm->add_instruction(migraphx::make_op("add"), tanhx, tanhx);
mm->add_instruction(migraphx::make_op("contiguous"), r); mm->add_instruction(migraphx::make_op("contiguous"), r);
......
...@@ -11,7 +11,8 @@ struct test_trans_tanh1 : verify_program<test_trans_tanh1> ...@@ -11,7 +11,8 @@ struct test_trans_tanh1 : verify_program<test_trans_tanh1>
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = mm->add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), x); auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), x);
auto tanhx = mm->add_instruction(migraphx::make_op("tanh"), tx); auto tanhx = mm->add_instruction(migraphx::make_op("tanh"), tx);
auto r = mm->add_instruction(migraphx::make_op("add"), tanhx, tanhx); auto r = mm->add_instruction(migraphx::make_op("add"), tanhx, tanhx);
mm->add_return({tx, r}); mm->add_return({tx, r});
......
...@@ -13,7 +13,7 @@ struct test_transpose : verify_program<test_transpose> ...@@ -13,7 +13,7 @@ struct test_transpose : verify_program<test_transpose>
migraphx::shape s{migraphx::shape::float_type, {4, 3, 4, 4}}; migraphx::shape s{migraphx::shape::float_type, {4, 3, 4, 4}};
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
std::vector<int64_t> perm = {0, 2, 3, 1}; std::vector<int64_t> perm = {0, 2, 3, 1};
auto l = mm->add_instruction(migraphx::make_op("transpose", {{"dims", perm}}), x); auto l = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), x);
mm->add_instruction(migraphx::make_op("contiguous"), l); mm->add_instruction(migraphx::make_op("contiguous"), l);
return p; return p;
} }
......
...@@ -16,7 +16,7 @@ struct test_triadd2 : verify_program<test_triadd2> ...@@ -16,7 +16,7 @@ struct test_triadd2 : verify_program<test_triadd2>
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", b); auto z = mm->add_parameter("z", b);
auto zb = mm->add_instruction( auto zb = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"dims", s.lens()}}), z); migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", s.lens()}}), z);
auto sum = mm->add_instruction(migraphx::make_op("add"), x, y); auto sum = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_instruction(migraphx::make_op("add"), sum, zb); mm->add_instruction(migraphx::make_op("add"), sum, zb);
return p; return p;
......
...@@ -17,7 +17,7 @@ struct test_triadd_broadcast : verify_program<test_triadd_broadcast> ...@@ -17,7 +17,7 @@ struct test_triadd_broadcast : verify_program<test_triadd_broadcast>
auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 2}}); auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 2}});
auto z = mm->add_parameter("z", {migraphx::shape::float_type, {2, 2, 3}}); auto z = mm->add_parameter("z", {migraphx::shape::float_type, {2, 2, 3}});
auto by = mm->add_instruction( auto by = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 0}, {"dims", x->get_shape().lens()}}), y); migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", x->get_shape().lens()}}), y);
auto sum = mm->add_instruction(migraphx::make_op("add"), x, by); auto sum = mm->add_instruction(migraphx::make_op("add"), x, by);
mm->add_instruction(migraphx::make_op("add"), sum, z); mm->add_instruction(migraphx::make_op("add"), sum, z);
return p; return p;
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_where : verify_program<test_where>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {1, 3, 4, 5}};
migraphx::shape sx{migraphx::shape::float_type, {1, 3, 4, 5}};
auto b = mm->add_parameter("b", sb);
auto x = mm->add_parameter("x", sx);
auto y = mm->add_parameter("y", sx);
auto r = mm->add_instruction(migraphx::make_op("where"), b, x, y);
mm->add_return({r});
return p;
};
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_where2 : verify_program<test_where2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {1, 3, 4, 5}};
migraphx::shape sx{migraphx::shape::float_type, {1}};
auto b = mm->add_parameter("b", sb);
auto x = mm->add_parameter("x", sx);
auto y = mm->add_parameter("y", sx);
auto mbx = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 5}}}), x);
auto mby = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 5}}}), y);
auto r = mm->add_instruction(migraphx::make_op("where"), b, mbx, mby);
mm->add_return({r});
return p;
};
};
import string, sys, re, os, runpy import string, sys, re, runpy
from functools import wraps from functools import wraps
type_map = {} type_map = {}
......
...@@ -73,26 +73,22 @@ migraphx_shape_datatype_t to_shape_type(shape::type_t t) ...@@ -73,26 +73,22 @@ migraphx_shape_datatype_t to_shape_type(shape::type_t t)
target get_target(const std::string& name) { return make_target(name); } target get_target(const std::string& name) { return make_target(name); }
migraphx::compile_options to_compile_options(const migraphx_compile_options& options) void set_offload_copy(compile_options& options, bool value) { options.offload_copy = value; }
{
migraphx::compile_options result{};
result.offload_copy = options.offload_copy;
result.fast_math = options.fast_math;
return result;
}
migraphx::file_options to_file_options(const migraphx_file_options& options) void set_fast_math(compile_options& options, bool value) { options.fast_math = value; }
{
migraphx::file_options result{}; void set_file_format(file_options& options, const char* format) { options.format = format; }
result.format = options.format;
return result;
}
void set_default_dim_value(onnx_options& options, size_t value) void set_default_dim_value(onnx_options& options, size_t value)
{ {
options.default_dim_value = value; options.default_dim_value = value;
} }
void set_default_loop_iterations(onnx_options& options, int64_t value)
{
options.max_loop_iterations = value;
}
void set_nhwc(tf_options& options, bool is_nhwc) { options.is_nhwc = is_nhwc; } void set_nhwc(tf_options& options, bool is_nhwc) { options.is_nhwc = is_nhwc; }
void set_default_dim_value(tf_options& options, size_t value) { options.batch_size = value; } void set_default_dim_value(tf_options& options, size_t value) { options.batch_size = value; }
......
...@@ -41,26 +41,6 @@ typedef enum { ...@@ -41,26 +41,6 @@ typedef enum {
} migraphx_shape_datatype_t; } migraphx_shape_datatype_t;
#undef MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES #undef MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES
/// Options to be passed when compiling
typedef struct
{
/// For targets with offloaded memory(such as the gpu), this will insert
/// instructions during compilation to copy the input parameters to the
/// offloaded memory and to copy the final result from the offloaded
/// memory back to main memory.
bool offload_copy;
/// Optimize math functions to use faster approximate versions. There may
/// be slight accuracy degredation when enabled.
bool fast_math;
} migraphx_compile_options;
/// Options for saving and loading files
typedef struct
{
/// Format to be used for file. It can either be json or msgpack
const char* format;
} migraphx_file_options;
<% generate_c_header() %> <% generate_c_header() %>
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -3,5 +3,5 @@ pip3 install -r requirements.txt ...@@ -3,5 +3,5 @@ pip3 install -r requirements.txt
# Add newer cmake to the path # Add newer cmake to the path
export PATH="/opt/cmake/bin:$PATH" export PATH="/opt/cmake/bin:$PATH"
export CXXFLAGS="-D__HIP_PLATFORM_HCC__=1 -w" export CXXFLAGS="-D__HIP_PLATFORM_HCC__=1 -w"
./build.sh --config Release --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --use_migraphx ./build.sh --config Release --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --test --use_migraphx
# pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl # pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl
...@@ -26,6 +26,8 @@ struct allocation_model ...@@ -26,6 +26,8 @@ struct allocation_model
std::string copy() const; std::string copy() const;
/// Create an allocation operator for the given shape /// Create an allocation operator for the given shape
operation allocate(const shape& s) const; operation allocate(const shape& s) const;
/// Create a preallocated operator for the given shape
operation preallocate(const shape& s, const std::string& id) const;
}; };
#else #else
...@@ -34,7 +36,8 @@ struct allocation_model ...@@ -34,7 +36,8 @@ struct allocation_model
interface('allocation_model', interface('allocation_model',
virtual('name', returns='std::string', const=True), virtual('name', returns='std::string', const=True),
virtual('copy', returns='std::string', const=True), virtual('copy', returns='std::string', const=True),
virtual('allocate', s='const shape&', returns='operation', const=True) virtual('allocate', s='const shape&', returns='operation', const=True),
virtual('preallocate', s='const shape&', id='std::string', returns='operation', const=True)
) )
%> %>
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <migraphx/module_ref.hpp> #include <migraphx/module_ref.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include <migraphx/auto_any_cast.hpp> #include <migraphx/auto_any_cast.hpp>
#include <migraphx/lifetime.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
namespace migraphx { namespace migraphx {
...@@ -178,7 +179,7 @@ shape normalize_compute_shape_op(const T& x, ...@@ -178,7 +179,7 @@ shape normalize_compute_shape_op(const T& x,
} }
template <class T> template <class T>
auto compute_op(rank<2>, auto compute_op(rank<1>,
const T& x, const T& x,
context& ctx, context& ctx,
const shape& output_shape, const shape& output_shape,
...@@ -188,14 +189,6 @@ auto compute_op(rank<2>, ...@@ -188,14 +189,6 @@ auto compute_op(rank<2>,
return x.compute(auto_any_cast(ctx), output_shape, input); return x.compute(auto_any_cast(ctx), output_shape, input);
} }
template <class T>
auto compute_op(
rank<1>, const T& x, context&, const shape& output_shape, const std::vector<argument>& input)
-> decltype(x.compute(output_shape, input))
{
return x.compute(output_shape, input);
}
template <class T> template <class T>
argument compute_op(rank<0>, const T& x, context&, const shape&, const std::vector<argument>&) argument compute_op(rank<0>, const T& x, context&, const shape&, const std::vector<argument>&)
{ {
...@@ -207,50 +200,118 @@ template <class T> ...@@ -207,50 +200,118 @@ template <class T>
argument argument
compute_op(const T& x, context& ctx, const shape& output_shape, const std::vector<argument>& input) compute_op(const T& x, context& ctx, const shape& output_shape, const std::vector<argument>& input)
{ {
return compute_op(rank<2>{}, x, ctx, output_shape, input); return compute_op(rank<1>{}, x, ctx, output_shape, input);
} }
template <class T> template <class T>
auto compute_op(rank<2>, const T& x, const shape& output_shape, const std::vector<argument>& input) auto compute_op(rank<1>, const T& x, const shape& output_shape, const std::vector<argument>& input)
-> decltype(x.compute(output_shape, input)) -> decltype(x.compute(output_shape, input))
{ {
return x.compute(output_shape, input); return x.compute(output_shape, input);
} }
template <class T> template <class T>
auto compute_op(rank<1>, const T& x, const shape& output_shape, const std::vector<argument>& input) argument compute_op(rank<0>, const T& x, const shape&, const std::vector<argument>&)
-> decltype(x.compute(auto_any_cast(std::declval<context&>()), output_shape, input))
{ {
std::string name = x.name(); std::string name = x.name();
MIGRAPHX_THROW("Not computable without a context: " + name); MIGRAPHX_THROW("Not computable: " + name);
} }
template <class T> template <class T>
argument compute_op(rank<0>, const T& x, const shape&, const std::vector<argument>&) argument compute_op(const T& x, const shape& output_shape, const std::vector<argument>& input)
{
return compute_op(rank<1>{}, x, output_shape, input);
}
template <class T, class F>
auto compute_op(rank<1>,
const T& x,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(output, inputs, module_args, f))
{
return x.compute(output, inputs, module_args, f);
}
template <class T, class F>
argument compute_op(rank<0>,
const T& x,
const shape&,
const std::vector<argument>&,
const std::vector<module_ref>&,
F)
{ {
std::string name = x.name(); std::string name = x.name();
MIGRAPHX_THROW("Not computable: " + name); MIGRAPHX_THROW("Not computable: " + name);
} }
template <class T> template <class T, class F>
argument compute_op(const T& x, const shape& output_shape, const std::vector<argument>& input) argument compute_op(const T& x,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f)
{ {
return compute_op(rank<2>{}, x, output_shape, input); return compute_op(rank<1>{}, x, output, inputs, module_args, f);
} }
template <class T, class F> template <class T, class F>
auto compute_op(rank<1>, auto compute_op(rank<4>,
const T& x, const T& x,
context& ctx,
const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args, const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(inputs, module_args, f)) F f) -> decltype(x.compute(auto_any_cast(ctx), output, inputs, module_args, f))
{ {
return x.compute(inputs, module_args, f); return x.compute(auto_any_cast(ctx), output, inputs, module_args, f);
} }
template <class T, class F> template <class T, class F>
argument auto compute_op(rank<3>,
compute_op(rank<0>, const T& x, const std::vector<argument>&, const std::vector<module_ref>&, F) const T& x,
context&,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(output, inputs, module_args, f))
{
return x.compute(output, inputs, module_args, f);
}
template <class T, class F>
auto compute_op(rank<2>,
const T& x,
context&,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>&,
F) -> decltype(x.compute(output, inputs))
{
return x.compute(output, inputs);
}
template <class T, class F>
auto compute_op(rank<1>,
const T& x,
context& ctx,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>&,
F) -> decltype(x.compute(auto_any_cast(ctx), output, inputs))
{
return x.compute(auto_any_cast(ctx), output, inputs);
}
template <class T, class F>
argument compute_op(rank<0>,
const T& x,
context&,
const shape&,
const std::vector<argument>&,
const std::vector<module_ref>&,
F)
{ {
std::string name = x.name(); std::string name = x.name();
MIGRAPHX_THROW("Not computable: " + name); MIGRAPHX_THROW("Not computable: " + name);
...@@ -258,11 +319,13 @@ argument ...@@ -258,11 +319,13 @@ argument
template <class T, class F> template <class T, class F>
argument compute_op(const T& x, argument compute_op(const T& x,
context& ctx,
const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args, const std::vector<module_ref>& module_args,
F f) F f)
{ {
return compute_op(rank<1>{}, x, inputs, module_args, f); return compute_op(rank<4>{}, x, ctx, output, inputs, module_args, f);
} }
template <class T> template <class T>
...@@ -385,9 +448,9 @@ void from_value_op(T& x, const value& v) ...@@ -385,9 +448,9 @@ void from_value_op(T& x, const value& v)
} }
template <class T> template <class T>
bool is_borrowed_op(const T&) lifetime get_lifetime_op(const T&)
{ {
return false; return lifetime::local;
} }
} // namespace detail } // namespace detail
...@@ -403,7 +466,8 @@ bool is_borrowed_op(const T&) ...@@ -403,7 +466,8 @@ bool is_borrowed_op(const T&)
const = True, const = True,
default = 'detail::need_normalization_op'), default = 'detail::need_normalization_op'),
virtual('has_finalize', returns = 'bool', const = True, default = 'detail::has_finalize_op'), virtual('has_finalize', returns = 'bool', const = True, default = 'detail::has_finalize_op'),
virtual('is_borrowed', returns = 'bool', const = True, default = 'detail::is_borrowed_op'), virtual(
'get_lifetime', returns = 'lifetime', const = True, default = 'detail::get_lifetime_op'),
virtual('output_alias', virtual('output_alias',
returns = 'std::ptrdiff_t', returns = 'std::ptrdiff_t',
input = 'const std::vector<shape>&', input = 'const std::vector<shape>&',
...@@ -447,10 +511,22 @@ bool is_borrowed_op(const T&) ...@@ -447,10 +511,22 @@ bool is_borrowed_op(const T&)
virtual( virtual(
'compute', 'compute',
returns = 'argument', returns = 'argument',
output = 'const shape&',
input = 'const std::vector<argument>&',
module_args = 'const std::vector<module_ref>&',
run =
'std::function<std::vector<argument>(module_ref&, const std::unordered_map<std::string, argument>&)>',
const = True,
default = 'detail::compute_op'),
virtual(
'compute',
returns = 'argument',
ctx = 'context&',
output = 'const shape&',
input = 'const std::vector<argument>&', input = 'const std::vector<argument>&',
module_args = 'const std::vector<module_ref>&', module_args = 'const std::vector<module_ref>&',
run = run =
'std::function<std::vector<argument>(module_ref& mdl, const std::unordered_map<std::string, argument>& inputs)>', 'std::function<std::vector<argument>(module_ref&, const std::unordered_map<std::string, argument>&)>',
const = True, const = True,
default = 'detail::compute_op'), default = 'detail::compute_op'),
virtual('to_value', returns = 'value', const = True, default = 'detail::to_value_op'), virtual('to_value', returns = 'value', const = True, default = 'detail::to_value_op'),
......
...@@ -8,12 +8,14 @@ ...@@ -8,12 +8,14 @@
#include <utility> #include <utility>
#include <migraphx/functional.hpp> #include <migraphx/functional.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/rank.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
struct program; struct program;
struct module; struct module;
struct module_pass_manager;
#ifdef DOXYGEN #ifdef DOXYGEN
...@@ -24,6 +26,7 @@ struct pass ...@@ -24,6 +26,7 @@ struct pass
/// A unique name used to identify the pass /// A unique name used to identify the pass
std::string name() const; std::string name() const;
/// Run the pass on the module /// Run the pass on the module
void apply(module_pass_manager& mpm) const;
void apply(module& m) const; void apply(module& m) const;
/// Run the pass on the program /// Run the pass on the program
void apply(program& p) const; void apply(program& p) const;
...@@ -31,10 +34,34 @@ struct pass ...@@ -31,10 +34,34 @@ struct pass
#else #else
module& get_module(module_pass_manager& mpm);
namespace detail {
template <class T>
auto module_pass_manager_apply(rank<1>, const T& x, module_pass_manager& mpm)
-> decltype(x.apply(get_module(mpm)))
{
return x.apply(get_module(mpm));
}
template <class T>
void module_pass_manager_apply(rank<0>, const T&, module_pass_manager&)
{
}
template <class T>
void module_pass_manager_apply(const T& x, module_pass_manager& mpm)
{
module_pass_manager_apply(rank<1>{}, x, mpm);
}
} // namespace detail
<% <%
interface('pass', interface('pass',
virtual('name', returns='std::string', const=True), virtual('name', returns='std::string', const=True),
virtual('apply', returns='void', m='module &', const=True, default='migraphx::nop'), virtual('apply', returns='void', mpm='module_pass_manager &', const=True, default='migraphx::detail::module_pass_manager_apply'),
virtual('apply', returns='void', p='program &', const=True, default='migraphx::nop') virtual('apply', returns='void', p='program &', const=True, default='migraphx::nop')
) )
%> %>
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# #
# Build MIGraphX prerequisites for docker container # Build MIGraphX prerequisites for docker container
set -e
#install pip3, rocm-cmake, rocblas and miopen #install pip3, rocm-cmake, rocblas and miopen
apt update && apt install -y python3-pip rocm-cmake rocblas miopen-hip openmp-extras apt update && apt install -y python3-pip rocm-cmake rocblas miopen-hip openmp-extras
...@@ -12,23 +14,15 @@ PREFIX=/usr/local ...@@ -12,23 +14,15 @@ PREFIX=/usr/local
REQ_FILE_DIR="" REQ_FILE_DIR=""
if [ "$#" -ge 2 ]; then if [ "$#" -ge 2 ]; then
PREFIX=$1 PREFIX=$1
REQ_FILE_DIR=$2 cd $2
elif [ "$#" -eq 1 ]; then elif [ "$#" -eq 1 ]; then
PREFIX=$1 PREFIX=$1
fi fi
echo "Dependencies are install at $PREFIX" echo "Dependencies are install at $PREFIX"
# Manually ignore rocm dependencies # Install deps with rbuild
cget -p $PREFIX ignore \ rbuild prepare -d $PREFIX -s develop
RadeonOpenCompute/clang-ocl \
ROCm-Developer-Tools/HIP \
ROCmSoftwarePlatform/MIOpen \
ROCmSoftwarePlatform/MIOpenGEMM \
ROCmSoftwarePlatform/rocBLAS
cget -p $PREFIX init --cxx /opt/rocm/llvm/bin/clang++ --cc /opt/rocm/llvm/bin/clang
cget -p $PREFIX install -f ${REQ_FILE_DIR}dev-requirements.txt
cget -p $PREFIX install oneapi-src/oneDNN@v1.7
# install onnx package for unit tests # install onnx package for unit tests
pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8 pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment