Commit 30c49503 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

manual merge

parents 870a396b 09aaa63e
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp> #include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
...@@ -207,7 +207,7 @@ static auto run_prog(migraphx::program p, int64_t iter_num, bool cond, int64_t i ...@@ -207,7 +207,7 @@ static auto run_prog(migraphx::program p, int64_t iter_num, bool cond, int64_t i
migraphx::shape s{migraphx::shape::int64_type, {1}}; migraphx::shape s{migraphx::shape::int64_type, {1}};
migraphx::shape sc{migraphx::shape::bool_type}; migraphx::shape sc{migraphx::shape::bool_type};
p.compile(migraphx::ref::target{}); p.compile(migraphx::make_target("ref"));
migraphx::parameter_map pp; migraphx::parameter_map pp;
pp["iter_num"] = migraphx::argument(si, &iter_num); pp["iter_num"] = migraphx::argument(si, &iter_num);
pp["ccond"] = migraphx::argument(sc, &cond); pp["ccond"] = migraphx::argument(sc, &cond);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
#include "test.hpp" #include "test.hpp"
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
...@@ -82,7 +82,7 @@ TEST_CASE(as_file) ...@@ -82,7 +82,7 @@ TEST_CASE(as_file)
TEST_CASE(compiled) TEST_CASE(compiled)
{ {
migraphx::program p1 = create_program(); migraphx::program p1 = create_program();
p1.compile(migraphx::ref::target{}); p1.compile(migraphx::make_target("ref"));
std::vector<char> buffer = migraphx::save_buffer(p1); std::vector<char> buffer = migraphx::save_buffer(p1);
migraphx::program p2 = migraphx::load_buffer(buffer); migraphx::program p2 = migraphx::load_buffer(buffer);
EXPECT(p1.sort() == p2.sort()); EXPECT(p1.sort() == p2.sort());
......
...@@ -60,7 +60,9 @@ struct reflectable_type ...@@ -60,7 +60,9 @@ struct reflectable_type
return migraphx::pack(f(self.value, "value")); return migraphx::pack(f(self.value, "value"));
} }
}; };
std::vector<nested_type> nested_types = {}; std::vector<nested_type> nested_types = {};
std::tuple<int, nested_type, std::string> tuple_items = std::make_tuple(0, nested_type{0}, "");
migraphx::optional<int> opt_value = migraphx::nullopt;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
...@@ -71,7 +73,8 @@ struct reflectable_type ...@@ -71,7 +73,8 @@ struct reflectable_type
f(self.et, "et"), f(self.et, "et"),
f(self.se, "se"), f(self.se, "se"),
f(self.ce, "ce"), f(self.ce, "ce"),
f(self.nested_types, "nested_types")); f(self.nested_types, "nested_types"),
f(self.tuple_items, "tuple_items"));
} }
}; };
...@@ -83,7 +86,9 @@ TEST_CASE(serialize_reflectable_type) ...@@ -83,7 +86,9 @@ TEST_CASE(serialize_reflectable_type)
{}, {},
reflectable_type::simple1, reflectable_type::simple1,
reflectable_type::class_enum::class2, reflectable_type::class_enum::class2,
{{1}, {2}}}; {{1}, {2}},
{5, {4}, "hello"},
{migraphx::nullopt}};
migraphx::value v1 = migraphx::to_value(t1); migraphx::value v1 = migraphx::to_value(t1);
reflectable_type t2 = migraphx::from_value<reflectable_type>(v1); reflectable_type t2 = migraphx::from_value<reflectable_type>(v1);
migraphx::value v2 = migraphx::to_value(t2); migraphx::value v2 = migraphx::to_value(t2);
...@@ -125,6 +130,21 @@ TEST_CASE(serialize_empty_struct) ...@@ -125,6 +130,21 @@ TEST_CASE(serialize_empty_struct)
EXPECT(v.at("a").to<int>() == 1); EXPECT(v.at("a").to<int>() == 1);
} }
TEST_CASE(serialize_empty_optional)
{
migraphx::optional<int> x{};
migraphx::value v = migraphx::to_value(x);
EXPECT(v.is_null());
}
TEST_CASE(serialize_optional)
{
migraphx::optional<int> x{2};
migraphx::value v = migraphx::to_value(x);
EXPECT(v.is_int64());
EXPECT(v.to<int>() == 2);
}
TEST_CASE(from_value_binary) TEST_CASE(from_value_binary)
{ {
std::vector<std::uint8_t> data(10); std::vector<std::uint8_t> data(10);
......
...@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize) ...@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT(s3 != s4); EXPECT(s3 != s4);
} }
TEST_CASE(any_of_dynamic_true)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s0{sub_shapes};
EXPECT(s0.any_of_dynamic());
sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 1}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s1{sub_shapes};
EXPECT(s1.any_of_dynamic());
}
TEST_CASE(any_of_dynamic_false)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {1, 4}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s{sub_shapes};
EXPECT(not s.any_of_dynamic());
}
TEST_CASE(test_shape_packed) TEST_CASE(test_shape_packed)
{ {
migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}}; migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}};
......
...@@ -559,6 +559,32 @@ TEST_CASE(simplify_inner_broadcast2) ...@@ -559,6 +559,32 @@ TEST_CASE(simplify_inner_broadcast2)
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
TEST_CASE(simplify_inner_broadcast_scalar)
{
auto b = migraphx::op::multibroadcast{{32, 384}};
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1, 384}});
auto y = m1.add_parameter("y", {migraphx::shape::int32_type, {1, 1}});
auto xb = m1.add_instruction(b, x);
auto yb = m1.add_instruction(b, y);
auto sum = m1.add_instruction(migraphx::make_op("add"), xb, yb);
m1.add_instruction(pass_op{}, sum);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1, 384}});
auto y = m2.add_parameter("y", {migraphx::shape::int32_type, {1, 1}});
auto yb = m2.add_instruction(migraphx::op::multibroadcast{{1, 384}}, y);
auto sum = m2.add_instruction(migraphx::make_op("add"), x, yb);
auto sumb = m2.add_instruction(b, sum);
m2.add_instruction(pass_op{}, sumb);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_add_conv1) TEST_CASE(simplify_add_conv1)
{ {
migraphx::module m; migraphx::module m;
...@@ -1041,16 +1067,18 @@ TEST_CASE(simplify_neg_unit_mult_const) ...@@ -1041,16 +1067,18 @@ TEST_CASE(simplify_neg_unit_mult_const)
{ {
migraphx::module m1; migraphx::module m1;
{ {
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}}); auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1, 6}});
auto unit = m1.add_literal(-1); auto unit = m1.add_literal(
migraphx::literal{{migraphx::shape::int32_type, {1, 6}}, std::vector<int>(6, -1)});
m1.add_instruction(migraphx::make_op("mul"), x, unit); m1.add_instruction(migraphx::make_op("mul"), x, unit);
} }
run_pass(m1); run_pass(m1);
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}}); auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1, 6}});
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT((m1 == m2)); EXPECT((m1 == m2));
...@@ -1068,8 +1096,30 @@ TEST_CASE(simplify_neg_unit_mult_const2) ...@@ -1068,8 +1096,30 @@ TEST_CASE(simplify_neg_unit_mult_const2)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}}); auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT((m1 == m2));
}
TEST_CASE(simplify_neg_unit_mult_const_add)
{
migraphx::module m1;
{
auto unit = m1.add_literal(-1);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto x2 = m1.add_instruction(migraphx::make_op("mul"), unit, x);
m1.add_instruction(migraphx::make_op("add"), x2, x2);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("add"), x2, x2);
} }
EXPECT((m1 == m2)); EXPECT((m1 == m2));
...@@ -1091,8 +1141,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec) ...@@ -1091,8 +1141,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", x_shape); auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
...@@ -1114,8 +1165,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec2) ...@@ -1114,8 +1165,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec2)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", x_shape); auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
...@@ -1133,8 +1185,9 @@ TEST_CASE(simplify_neg_unit_div_const) ...@@ -1133,8 +1185,9 @@ TEST_CASE(simplify_neg_unit_div_const)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}}); auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
...@@ -1156,8 +1209,9 @@ TEST_CASE(simplify_neg_unit_div_const_vec) ...@@ -1156,8 +1209,9 @@ TEST_CASE(simplify_neg_unit_div_const_vec)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", x_shape); auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
...@@ -1216,8 +1270,9 @@ TEST_CASE(simplify_sub_neg_zero_const) ...@@ -1216,8 +1270,9 @@ TEST_CASE(simplify_sub_neg_zero_const)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}}); auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
...@@ -1238,8 +1293,9 @@ TEST_CASE(simplify_sub_neg_zero_const_vec) ...@@ -1238,8 +1293,9 @@ TEST_CASE(simplify_sub_neg_zero_const_vec)
migraphx::module m2; migraphx::module m2;
{ {
auto x = m2.add_parameter("x", x_shape); auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x); auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
} }
EXPECT(m1 == m2); EXPECT(m1 == m2);
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#include <migraphx/simplify_qdq.hpp> #include <migraphx/simplify_qdq.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <test.hpp> #include <test.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
...@@ -686,8 +686,8 @@ TEST_CASE(conv_correctness) ...@@ -686,8 +686,8 @@ TEST_CASE(conv_correctness)
auto input = migraphx::argument(si, iv.data()); auto input = migraphx::argument(si, iv.data());
std::vector<float> wv(sw.elements(), 10); std::vector<float> wv(sw.elements(), 10);
auto weights = migraphx::argument(sw, wv.data()); auto weights = migraphx::argument(sw, wv.data());
p1.compile(migraphx::target(migraphx::ref::target{})); p1.compile(migraphx::target(migraphx::make_target("ref")));
p2.compile(migraphx::target(migraphx::ref::target{})); p2.compile(migraphx::target(migraphx::make_target("ref")));
auto result1 = p1.eval({{"input", input}, {"weights", weights}}).back(); auto result1 = p1.eval({{"input", input}, {"weights", weights}}).back();
std::vector<float> rv1(16); std::vector<float> rv1(16);
...@@ -736,8 +736,8 @@ TEST_CASE(dot_correctness) ...@@ -736,8 +736,8 @@ TEST_CASE(dot_correctness)
auto a = migraphx::argument(sh1, av.data()); auto a = migraphx::argument(sh1, av.data());
std::vector<float> bv(sh2.elements(), 10); std::vector<float> bv(sh2.elements(), 10);
auto b = migraphx::argument(sh2, bv.data()); auto b = migraphx::argument(sh2, bv.data());
p1.compile(migraphx::target(migraphx::ref::target{})); p1.compile(migraphx::target(migraphx::make_target("ref")));
p2.compile(migraphx::target(migraphx::ref::target{})); p2.compile(migraphx::target(migraphx::make_target("ref")));
auto result1 = p1.eval({{"a", a}, {"b", b}}).back(); auto result1 = p1.eval({{"a", a}, {"b", b}}).back();
std::vector<float> rv1(sh3.elements()); std::vector<float> rv1(sh3.elements());
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target.hpp> #include <migraphx/target.hpp>
#include "test.hpp" #include "test.hpp"
...@@ -43,7 +42,10 @@ TEST_CASE(make_invalid_target) ...@@ -43,7 +42,10 @@ TEST_CASE(make_invalid_target)
TEST_CASE(targets) TEST_CASE(targets)
{ {
auto ts = migraphx::get_targets(); auto ts = migraphx::get_targets();
EXPECT(ts.size() > 0); EXPECT(ts.size() == 0);
auto ref_t = migraphx::make_target("ref");
ts = migraphx::get_targets();
EXPECT(ts.size() == 1);
} }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -67,7 +67,13 @@ int main(int argc, const char* argv[]) ...@@ -67,7 +67,13 @@ int main(int argc, const char* argv[])
{ {
run_verify rv; run_verify rv;
rv.add_validation_for("gpu", &validate_gpu); rv.add_validation_for("gpu", &validate_gpu);
rv.disable_test_for("cpu", {"test_if_lp", "test_if_param", "test_if_literal"}); rv.disable_test_for("cpu",
{"test_if_lp",
"test_if_param",
"test_if_literal",
"test_select_module_add",
"test_select_module_reduce",
"test_select_module_conv"});
rv.disable_test_for("gpu", {"test_conv_bn_add"}); rv.disable_test_for("gpu", {"test_conv_bn_add"});
rv.run(argc, argv); rv.run(argc, argv);
} }
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "verify_program.hpp" #include "verify_program.hpp"
#include "test.hpp" #include "test.hpp"
#include <migraphx/env.hpp> #include <migraphx/env.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
...@@ -117,7 +117,7 @@ void run_verify::validate(const migraphx::target& t, ...@@ -117,7 +117,7 @@ void run_verify::validate(const migraphx::target& t,
std::vector<migraphx::argument> run_verify::run_ref(migraphx::program p, std::vector<migraphx::argument> run_verify::run_ref(migraphx::program p,
migraphx::parameter_map inputs) const migraphx::parameter_map inputs) const
{ {
migraphx::ref::target t{}; migraphx::target t = migraphx::make_target("ref");
auto_print pp{p, t.name()}; auto_print pp{p, t.name()};
compile_check(p, t); compile_check(p, t);
return p.eval(std::move(inputs)); return p.eval(std::move(inputs));
...@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con ...@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
migraphx::parameter_map m; migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes()) for(auto&& x : p.get_parameter_shapes())
{ {
m[x.first] = migraphx::generate_argument(x.second, get_hash(x.first)); if(x.second.dynamic())
{
// create static shape using maximum dimensions
migraphx::shape static_shape{x.second.type(), x.second.max_lens()};
m[x.first] = migraphx::generate_argument(static_shape, get_hash(x.first));
}
else
{
m[x.first] = migraphx::generate_argument(x.second, get_hash(x.first));
}
} }
auto gold_f = detach_async([=] { return run_ref(p, m); }); auto gold_f = detach_async([=] { return run_ref(p, m); });
......
...@@ -24,31 +24,30 @@ ...@@ -24,31 +24,30 @@
#include "verify_program.hpp" #include "verify_program.hpp"
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/reduce_mean.hpp>
migraphx::instruction_ref add_layernorm(migraphx::module& m, migraphx::instruction_ref add_layernorm(migraphx::module& m,
migraphx::instruction_ref x, migraphx::instruction_ref x,
std::vector<size_t> dims, std::vector<size_t> dims,
float eps = 1e-12f) float eps = 1e-12f)
{ {
auto scale = auto mgx_type = x->get_shape().type();
m.add_parameter("scale", migraphx::shape{migraphx::shape::float_type, {dims.back()}}); auto scale = m.add_parameter("scale", migraphx::shape{mgx_type, {dims.back()}});
auto bias = auto bias = m.add_parameter("bias", migraphx::shape{mgx_type, {dims.back()}});
m.add_parameter("bias", migraphx::shape{migraphx::shape::float_type, {dims.back()}});
auto epsilon = m.add_literal(eps); auto epsilon = m.add_literal(migraphx::literal{migraphx::shape{mgx_type}, {eps}});
auto exponent = m.add_literal(2.0f); auto exponent = m.add_literal(migraphx::literal{migraphx::shape{mgx_type}, {2.0f}});
auto mean = m.add_instruction(migraphx::op::reduce_mean({2}), x); auto mean = m.add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2}}}), x);
auto mean_mbcast = auto mean_mbcast =
m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), mean); m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), mean);
auto sub = m.add_instruction(migraphx::make_op("sub"), x, mean_mbcast); auto sub = m.add_instruction(migraphx::make_op("sub"), x, mean_mbcast);
auto exponent_mbcast = auto exponent_mbcast =
m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), exponent); m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), exponent);
auto pow = m.add_instruction(migraphx::make_op("pow"), sub, exponent_mbcast); auto pow = m.add_instruction(migraphx::make_op("pow"), sub, exponent_mbcast);
auto var = m.add_instruction(migraphx::op::reduce_mean({2}), pow); auto var = m.add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2}}}), pow);
auto epsilon_mbcast = m.add_instruction( auto epsilon_mbcast = m.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, dims.at(1), 1}}}), epsilon); migraphx::make_op("multibroadcast", {{"out_lens", {1, dims.at(1), 1}}}), epsilon);
auto add_epsilon = m.add_instruction(migraphx::make_op("add"), var, epsilon_mbcast); auto add_epsilon = m.add_instruction(migraphx::make_op("add"), var, epsilon_mbcast);
...@@ -90,6 +89,32 @@ struct test_layernorm2 : verify_program<test_layernorm2> ...@@ -90,6 +89,32 @@ struct test_layernorm2 : verify_program<test_layernorm2>
} }
}; };
struct test_layernorm_large : verify_program<test_layernorm_large>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 32, 262144};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims});
add_layernorm(*mm, x, dims);
return p;
}
};
struct test_layernorm_fp16 : verify_program<test_layernorm_fp16>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 24, 64};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::half_type, dims});
add_layernorm(*mm, x, dims);
return p;
}
};
struct test_layernorm_eps : verify_program<test_layernorm_eps> struct test_layernorm_eps : verify_program<test_layernorm_eps>
{ {
migraphx::program create_program() const migraphx::program create_program() const
......
...@@ -76,3 +76,29 @@ struct test_reduce_mean_2 : verify_program<test_reduce_mean_2> ...@@ -76,3 +76,29 @@ struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
return p; return p;
}; };
}; };
struct test_large_reduce_mean1 : verify_program<test_large_reduce_mean1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 256 * 256 * 16}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
struct test_large_reduce_mean2 : verify_program<test_large_reduce_mean2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {1, 32, 262144}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::op::reduce_mean{{2}}, x);
return p;
};
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_add : verify_program<test_select_module_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm->add_literal(migraphx::literal{lit_s, {6}});
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins0 =
submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
auto add_ins1 =
submod->add_instruction(migraphx::make_op("add"), add_ins0, broadcast_lit);
submod->add_return({add_ins0, add_ins1});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret0 =
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
auto ret1 =
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), sm_ins);
mm->add_return({ret0, ret1});
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_conv : verify_program<test_select_module_conv>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 3, 4, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
migraphx::shape weights_shape{migraphx::shape::float_type, {2, 3, 3, 3}};
std::vector<float> weights_data(2 * 3 * 3 * 3, 2.0);
auto weights = submod->add_literal(migraphx::literal{weights_shape, weights_data});
auto conv_ins = submod->add_instruction(
migraphx::make_op("convolution", {{"padding", {1, 1}}}), sm_input, weights);
submod->add_return({conv_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(
migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {4, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_reduce : verify_program<test_select_module_reduce>
{
migraphx::program create_program() const
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
...@@ -25,6 +25,7 @@ import argparse ...@@ -25,6 +25,7 @@ import argparse
import numpy as np import numpy as np
import migraphx import migraphx
import onnxruntime as ort import onnxruntime as ort
import sys
def parse_args(): def parse_args():
...@@ -33,15 +34,13 @@ def parse_args(): ...@@ -33,15 +34,13 @@ def parse_args():
'MIGraphX accuracy checker. Use to verify onnx files to ensure MIGraphX\'s output \ 'MIGraphX accuracy checker. Use to verify onnx files to ensure MIGraphX\'s output \
is within tolerance of onnx runtime\'s expected output.' is within tolerance of onnx runtime\'s expected output.'
) )
req_args = parser.add_argument_group(title='required arguments') file_args = parser.add_argument_group(title='file type arguments')
req_args.add_argument('--onnx', file_args.add_argument('--onnx', type=str, help='path to onnx file')
type=str, file_args.add_argument('--tf', type=str, help='path to tf pb file')
required=True, parser.add_argument('--provider',
help='path to onnx file') type=str,
req_args.add_argument('--provider', default='CPUExecutionProvider',
type=str, help='execution provider for onnx runtime \
default='CPUExecutionProvider',
help='execution provider for onnx runtime \
(default = CPUExecutionProvider)') (default = CPUExecutionProvider)')
parser.add_argument('--batch', parser.add_argument('--batch',
type=int, type=int,
...@@ -50,6 +49,9 @@ def parse_args(): ...@@ -50,6 +49,9 @@ def parse_args():
parser.add_argument('--fill1', parser.add_argument('--fill1',
action='store_true', action='store_true',
help='fill all arguments with a value of 1') help='fill all arguments with a value of 1')
parser.add_argument('--fill0',
action='store_true',
help='fill all arguments with a value of 0')
parser.add_argument('--verbose', parser.add_argument('--verbose',
action='store_true', action='store_true',
help='show verbose information (for debugging)') help='show verbose information (for debugging)')
...@@ -57,6 +59,12 @@ def parse_args(): ...@@ -57,6 +59,12 @@ def parse_args():
type=float, type=float,
default=1e-3, default=1e-3,
help='accuracy tolerance (default = 1e-3)') help='accuracy tolerance (default = 1e-3)')
parser.add_argument('--input-dim',
type=str,
action='append',
help='specify input parameter dimension \
with the following format --input_dim input_name:dim0,dim1,dim2...'
)
args = parser.parse_args() args = parser.parse_args()
return args return args
...@@ -111,42 +119,127 @@ def get_np_datatype(in_type): ...@@ -111,42 +119,127 @@ def get_np_datatype(in_type):
def main(): def main():
args = parse_args() args = parse_args()
use_onnx = True
if args.onnx == None:
use_onnx = False
if not use_onnx and args.tf == None:
print('Error: please specify either an onnx or tf pb file')
sys.exit(-1)
model_name = args.onnx model_name = args.onnx
batch = args.batch batch = args.batch
model = migraphx.parse_onnx(model_name, default_dim_value=batch) custom_inputs = args.input_dim
input_dims = {}
if custom_inputs != None:
for input in custom_inputs:
input_dim = ''.join(input.split(':')[:-1])
dims = [int(dim) for dim in input.split(':')[-1].split(',')]
input_dims[input_dim] = dims
if use_onnx:
if not input_dims:
model = migraphx.parse_onnx(model_name, default_dim_value=batch)
else:
model = migraphx.parse_onnx(model_name,
default_dim_value=batch,
map_input_dims=input_dims)
else:
model_name = args.tf
if not input_dims:
model = migraphx.parse_tf(model_name, batch_size=batch)
else:
model = migraphx.parse_tf(model_name,
batch_size=batch,
map_input_dims=input_dims)
if args.verbose: if args.verbose:
print(model) print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False) model.compile(migraphx.get_target('gpu'))
params = {} params = {}
test_inputs = {} test_inputs = {}
for name, shape in model.get_parameter_shapes().items(): for name, shape in model.get_parameter_shapes().items():
if args.verbose: if args.verbose:
print('Parameter {} -> {}'.format(name, shape)) print(f'Parameter {name} -> {shape}')
in_shape = shape.lens() in_shape = shape.lens()
in_type = shape.type_string() in_type = shape.type_string()
if not args.fill1: if not args.fill1 and not args.fill0:
test_input = np.random.rand(*(in_shape)).astype( test_input = np.random.rand(*(in_shape)).astype(
get_np_datatype(in_type)) get_np_datatype(in_type))
else: elif not args.fill0:
test_input = np.ones(in_shape).astype(get_np_datatype(in_type)) test_input = np.ones(in_shape).astype(get_np_datatype(in_type))
else:
test_input = np.zeros(in_shape).astype(get_np_datatype(in_type))
test_inputs[name] = test_input test_inputs[name] = test_input
params[name] = migraphx.to_gpu(migraphx.argument(test_input)) params[name] = migraphx.argument(test_input)
pred_migx = np.array(model.run(params)[-1])
pred_migx = np.array(migraphx.from_gpu(model.run(params)[-1])) if use_onnx:
sess = ort.InferenceSession(model_name, providers=[args.provider])
sess = ort.InferenceSession(model_name, providers=[args.provider]) ort_params = {}
for input in sess.get_inputs():
ort_params[input.name] = test_inputs[input.name]
try:
pred_fw = sess.run(None, ort_params)[-1]
except Exception as e:
if any(input_dims):
print(
'Error: custom input dim may not be compatible with onnx runtime'
)
raise e
else:
import tensorflow as tf
def load_tf_graph(model_name):
with tf.io.gfile.GFile(model_name, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with tf.compat.v1.Graph().as_default() as graph:
tf.graph_util.import_graph_def(graph_def)
return graph
graph = load_tf_graph(model_name)
is_nhwc = False
graph_ops = []
for op in graph.get_operations():
graph_ops.append(op.name)
if 'Conv' in op.node_def.op:
if 'NHWC' in op.get_attr('data_format').decode('utf-8'):
is_nhwc = True
graph_ops_set = set(graph_ops)
tf_dict = {}
for name in test_inputs.keys():
# graph.get_operations() adds 'import/' to the op name
tf_name = f'import/{name}'
if tf_name not in graph_ops_set:
continue
x = graph.get_tensor_by_name(f'{tf_name}:0')
tf_input = test_inputs[name]
# transpose input for NHWC model
if tf_input.ndim == 4 and is_nhwc:
tf_dict[x] = np.transpose(tf_input, (0, 2, 3, 1))
else:
tf_dict[x] = tf_input
ort_params = {} # assume last node in graph is output
for input in sess.get_inputs(): # TODO: let user specify op name for output
ort_params[input.name] = test_inputs[input.name] y = graph.get_tensor_by_name(f'{graph_ops[-1]}:0')
pred_ort = sess.run(None, ort_params)[-1] with tf.compat.v1.Session(graph=graph) as sess:
y_out = sess.run(y, feed_dict=tf_dict)
pred_fw = y_out
is_correct = check_correctness(pred_ort, pred_migx, args.tolerance, is_correct = check_correctness(pred_fw, pred_migx, args.tolerance,
args.tolerance, args.verbose) args.tolerance, args.verbose)
verbose_string = ' Rerun with --verbose for detailed information.' \ verbose_string = ' Rerun with --verbose for detailed information.' \
if not args.verbose else '' if not args.verbose else ''
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp> #include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp> #include <migraphx/register_op.hpp>
...@@ -134,6 +133,11 @@ void set_offload_copy(compile_options& options, bool value) { options.offload_co ...@@ -134,6 +133,11 @@ void set_offload_copy(compile_options& options, bool value) { options.offload_co
void set_fast_math(compile_options& options, bool value) { options.fast_math = value; } void set_fast_math(compile_options& options, bool value) { options.fast_math = value; }
void set_exhaustive_tune_flag(compile_options& options, bool value)
{
options.exhaustive_tune = value;
}
void set_file_format(file_options& options, const char* format) { options.format = format; } void set_file_format(file_options& options, const char* format) { options.format = format; }
void set_default_dim_value(onnx_options& options, size_t value) void set_default_dim_value(onnx_options& options, size_t value)
......
...@@ -22,9 +22,12 @@ ...@@ -22,9 +22,12 @@
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
cd /onnxruntime cd /onnxruntime
pip3 install -r requirements.txt pip3 install -r requirements-dev.txt
# Add newer cmake to the path # Add newer cmake to the path
export PATH="/opt/cmake/bin:$PATH" export PATH="/opt/cmake/bin:$PATH"
export CXXFLAGS="-D__HIP_PLATFORM_HCC__=1 -w" export CXXFLAGS="-D__HIP_PLATFORM_AMD__=1 -w"
./build.sh --config Release --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --test --use_migraphx ./build.sh --config Release --cmake_extra_defines CMAKE_HIP_COMPILER=/opt/rocm/llvm/bin/clang++ --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --skip_tests --rocm_home /opt/rocm --use_migraphx --migraphx_home /opt/rocm --rocm_version=`cat /opt/rocm/.info/version-dev`
# pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl
cd build/Linux/Release
#Add test launcher for onnxrt tests
../../../tools/ci_build/github/pai/migraphx_test_launcher.sh
...@@ -26,10 +26,12 @@ ...@@ -26,10 +26,12 @@
if [ -z "$ONNX_HOME" ] if [ -z "$ONNX_HOME" ]
then then
ONNX_HOME=$HOME # The onnx library uses ONNX_HOME, by default if it doesn't exist
# the path of " ~/.onnx " is used
ONNX_HOME=$HOME/.onnx
fi fi
model_dir=$ONNX_HOME/.onnx/models model_dir=$ONNX_HOME/models
tmp_dir=$ONNX_HOME/tmp/ tmp_dir=$ONNX_HOME/tmp/
mkdir -p $model_dir mkdir -p $model_dir
mkdir -p $tmp_dir mkdir -p $tmp_dir
...@@ -42,7 +44,6 @@ models="bvlc_alexnet \ ...@@ -42,7 +44,6 @@ models="bvlc_alexnet \
for name in $models for name in $models
do do
curl https://s3.amazonaws.com/download.onnx/models/opset_9/$name.tar.gz --output $tmp_dir/$name.tar.gz curl https://download.onnxruntime.ai/onnx/models/$name.tar.gz --output $tmp_dir/$name.tar.gz
tar -xzvf $tmp_dir/$name.tar.gz --directory $model_dir && rm $tmp_dir/$name.tar.gz tar -xzvf $tmp_dir/$name.tar.gz --directory $model_dir && rm $tmp_dir/$name.tar.gz
done done
...@@ -66,6 +66,7 @@ any_ptr get_queue_context(T&) ...@@ -66,6 +66,7 @@ any_ptr get_queue_context(T&)
{ {
return {}; return {};
} }
template <class T> template <class T>
void wait_for_context(T&, any_ptr) void wait_for_context(T&, any_ptr)
{ {
...@@ -87,6 +88,7 @@ void finish_on_context(T&, any_ptr){} ...@@ -87,6 +88,7 @@ void finish_on_context(T&, any_ptr){}
{ {
v = ctx.to_value(); v = ctx.to_value();
} }
inline void migraphx_from_value(const value& v, context& ctx) { ctx.from_value(v); } inline void migraphx_from_value(const value& v, context& ctx) { ctx.from_value(v); }
#endif #endif
......
...@@ -140,6 +140,8 @@ template <class T> ...@@ -140,6 +140,8 @@ template <class T>
auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs) auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs)) -> decltype(x.normalize_compute_shape(inputs))
{ {
if(inputs.empty())
MIGRAPHX_THROW("At least one input is required for " + x.name());
dependent_type<operation, T> y = x; dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].max_lens()); normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs); return any_cast<T>(y).normalize_compute_shape(inputs);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment