"vscode:/vscode.git/clone" did not exist on "77198d2e76cde72853a639acb709b459a9a6315d"
Commit 11e155c2 authored by Paul's avatar Paul
Browse files

Merge

parents 8a9c5bce aa7ff911
......@@ -1173,12 +1173,6 @@ TEST_CASE(gru_forward_args)
0.3852, -0.1170, -0.2937, 0.2979, -0.1357, 0.4257, 0.3884, -0.2916, 0.1071, 0.0934,
0.3645, -0.4310, -0.3480, 0.0702, -0.1558};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432,
-0.9887,
......@@ -1199,9 +1193,6 @@ TEST_CASE(gru_forward_args)
-1.0536,
-0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f;
// 3 args
......@@ -1242,6 +1233,11 @@ TEST_CASE(gru_forward_args)
// 4 args (bias is used)
{
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -1280,6 +1276,9 @@ TEST_CASE(gru_forward_args)
// 4 args (ih is used)
{
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -2210,15 +2209,6 @@ TEST_CASE(gru_bidirectional_args)
0.4101, 0.2641, -0.4110, -0.1681, 0.3582, -0.2089, 0.0852, 0.0963, 0.3866, 0.1955,
-0.2174, 0.1996, -0.2252, 0.1748, 0.1833, -0.3155, 0.2567, -0.4387, 0.3402, 0.0599};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484, 0.2363,
-0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992, 0.1517, 0.1817,
-0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203, 0.2187, -0.2990, -0.0416,
0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591, -0.2990, 0.2167, 0.1395, 0.2317,
0.1318, 0.1909, -0.3615, 0.1953, -0.2582, -0.2217, 0.3723, 0.1458, 0.2630, -0.0377,
0.1754, 0.0800, -0.3964, -0.3247, 0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432,
-0.9887,
......@@ -2239,11 +2229,6 @@ TEST_CASE(gru_bidirectional_args)
-1.0536,
-0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f;
// 3 args
......@@ -2288,6 +2273,15 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (bias is used)
{
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484,
0.2363, -0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992,
0.1517, 0.1817, -0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203,
0.2187, -0.2990, -0.0416, 0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591,
-0.2990, 0.2167, 0.1395, 0.2317, 0.1318, 0.1909, -0.3615, 0.1953, -0.2582,
-0.2217, 0.3723, 0.1458, 0.2630, -0.0377, 0.1754, 0.0800, -0.3964, -0.3247,
0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -2330,6 +2324,10 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (ih is used)
{
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -4186,7 +4184,6 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
-0.83699064, 0.49162736, -0.8271, -0.5683, 0.4562,
-1.2545, 1.2729, -0.4082, -0.4392, -0.9406,
0.7794, 1.8194, -0.5811, 0.2166};
std::vector<int> sl_data{1, 2, 3};
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -4196,10 +4193,11 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape ic_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape pph_shape{migraphx::shape::float_type, {num_dirct, 3 * hidden_size}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
// concatenation of hidden states as program output
{
std::vector<int> sl_data{1, 2, 3};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input_data});
......
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
......@@ -22,7 +23,7 @@ static void opt_pooling(migraphx::module& m)
TEST_CASE(rewrite_pooling_test)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2, 3, 4, 5}};
auto pooling_program = [&](const std::string& mode) {
auto pooling_program = [&](const migraphx::op::pooling_mode mode) {
migraphx::module m;
auto input = m.add_parameter("x", s);
auto ret = m.add_instruction(migraphx::make_op("pooling",
......@@ -46,15 +47,16 @@ TEST_CASE(rewrite_pooling_test)
return m;
};
auto test_rewrite = [&](const std::string& mode, const migraphx::operation& op) {
auto test_rewrite = [&](const migraphx::op::pooling_mode mode, const migraphx::operation& op) {
migraphx::module m1 = pooling_program(mode);
migraphx::module m2 = opt_program(op);
opt_pooling(m1);
EXPECT(m1 == m2);
};
test_rewrite("average", migraphx::make_op("reduce_mean", {{"axes", {1}}}));
test_rewrite("max", migraphx::make_op("reduce_max", {{"axes", {1}}}));
test_rewrite(migraphx::op::pooling_mode::average,
migraphx::make_op("reduce_mean", {{"axes", {1}}}));
test_rewrite(migraphx::op::pooling_mode::max, migraphx::make_op("reduce_max", {{"axes", {1}}}));
}
TEST_CASE(rewrite_avepooling_na1_test)
......@@ -64,12 +66,13 @@ TEST_CASE(rewrite_avepooling_na1_test)
migraphx::module m;
auto input = m.add_parameter("x", s);
auto ret = m.add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 1, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 4, 5}}}),
input);
auto ret =
m.add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 1, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 4, 5}}}),
input);
m.add_return({ret});
return m;
};
......@@ -88,12 +91,13 @@ TEST_CASE(rewrite_avepooling_na2_test)
migraphx::module m;
auto input = m.add_parameter("x", s);
auto ret = m.add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 0, 0}},
{"stride", {1, 2, 1}},
{"lengths", {3, 4, 5}}}),
input);
auto ret =
m.add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0}},
{"stride", {1, 2, 1}},
{"lengths", {3, 4, 5}}}),
input);
m.add_return({ret});
return m;
};
......@@ -113,7 +117,7 @@ TEST_CASE(rewrite_avepooling_na3_test)
auto input = m.add_parameter("x", s);
auto ret = m.add_instruction(migraphx::make_op("pooling",
{{"mode", "max"},
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 5}}}),
......@@ -135,7 +139,7 @@ TEST_CASE(literal_rewrite_pooling_test)
std::vector<float> data(s.elements());
std::iota(data.begin(), data.end(), 1.0f);
auto pooling_program = [&](const std::string& mode) {
auto pooling_program = [&](const migraphx::op::pooling_mode mode) {
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -163,7 +167,8 @@ TEST_CASE(literal_rewrite_pooling_test)
return p;
};
auto test_rewrite_pooling = [&](const std::string& mode, const migraphx::operation& op) {
auto test_rewrite_pooling = [&](const migraphx::op::pooling_mode mode,
const migraphx::operation& op) {
migraphx::program p1 = pooling_program(mode);
migraphx::program p2 = opt_program(op);
p1.compile(migraphx::ref::target{});
......@@ -174,8 +179,10 @@ TEST_CASE(literal_rewrite_pooling_test)
result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
};
test_rewrite_pooling("max", migraphx::make_op("reduce_max", {{"axes", {1}}}));
test_rewrite_pooling("average", migraphx::make_op("reduce_mean", {{"axes", {1}}}));
test_rewrite_pooling(migraphx::op::pooling_mode::max,
migraphx::make_op("reduce_max", {{"axes", {1}}}));
test_rewrite_pooling(migraphx::op::pooling_mode::average,
migraphx::make_op("reduce_mean", {{"axes", {1}}}));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -48,7 +48,7 @@ TEST_CASE(as_json)
TEST_CASE(as_file)
{
std::string filename = "migraphx_program.dat";
std::string filename = "migraphx_program.mxr";
migraphx::program p1 = create_program();
migraphx::save(p1, filename);
migraphx::program p2 = migraphx::load(filename);
......
......@@ -608,4 +608,15 @@ TEST_CASE(cpp_type_name)
EXPECT(test::throws([&] { migraphx::shape::cpp_type(migraphx::shape::tuple_type); }));
}
TEST_CASE(test_with_type)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2}, {1, 0}};
EXPECT(s.type() == migraphx::shape::float_type);
auto new_s = s.with_type(migraphx::shape::half_type);
EXPECT(s.type() == migraphx::shape::float_type);
EXPECT(s.type() != new_s.type());
EXPECT(s.lens() == new_s.lens());
EXPECT(s.strides() == new_s.strides());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -4,6 +4,7 @@
#include <migraphx/instruction.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/matcher.hpp>
......@@ -462,14 +463,15 @@ TEST_CASE(conv_pooling_dot)
d1);
auto bc1 = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 1280, 7, 7}}}), d2);
auto a1 = m1.add_instruction(migraphx::make_op("add"), c1, bc1);
auto ap = m1.add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
a1);
auto a1 = m1.add_instruction(migraphx::make_op("add"), c1, bc1);
auto ap =
m1.add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
a1);
auto fl = m1.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap);
auto q4 = add_quantize_op(m1, "quantizelinear", fl, scale, zero);
auto d8 = add_quantize_op(m1, "dequantizelinear", q4, scale, zero);
......@@ -508,14 +510,15 @@ TEST_CASE(conv_pooling_dot)
auto d5 = add_quantize_op(m2, "dequantizelinear", c1, scale1);
auto bc1 = m2.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 1280, 7, 7}}}), d2);
auto a1 = m2.add_instruction(migraphx::make_op("add"), d5, bc1);
auto ap = m2.add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
a1);
auto a1 = m2.add_instruction(migraphx::make_op("add"), d5, bc1);
auto ap =
m2.add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
a1);
auto fl = m2.add_instruction(migraphx::make_op("flatten", {{"axis", 1}}), ap);
auto q4 = add_quantize_op(m2, "quantizelinear", fl, scale, zero);
auto dot = m2.add_instruction(migraphx::make_op("quant_dot"), q4, db);
......@@ -564,16 +567,17 @@ TEST_CASE(mobilenet_snippet)
d1);
auto bc1 = mm.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 1280, 7, 7}}}), d2);
auto a1 = mm.add_instruction(migraphx::make_op("add"), c1, bc1);
auto q2 = add_quantize_op(mm, "quantizelinear", a1, scale, zero);
auto d6 = add_quantize_op(mm, "dequantizelinear", q2, scale, zero);
auto ap = mm.add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
d6);
auto a1 = mm.add_instruction(migraphx::make_op("add"), c1, bc1);
auto q2 = add_quantize_op(mm, "quantizelinear", a1, scale, zero);
auto d6 = add_quantize_op(mm, "dequantizelinear", q2, scale, zero);
auto ap =
mm.add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {7, 7}},
{"ceil_mode", 0}}),
d6);
auto q3 = add_quantize_op(mm, "quantizelinear", ap, scale, zero);
auto d7 = add_quantize_op(mm, "dequantizelinear", q3, scale, zero);
auto rs = mm.add_instruction(migraphx::make_op("reshape", {{"dims", {1, -1}}}), d7);
......
......@@ -12,6 +12,11 @@ TEST_CASE(make_target)
}
}
TEST_CASE(make_invalid_target)
{
EXPECT(test::throws([&] { migraphx::make_target("mi100"); }));
}
TEST_CASE(targets)
{
auto ts = migraphx::get_targets();
......
......@@ -649,8 +649,8 @@ TEST_CASE(pooling_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
migraphx::op::pooling avg_pool_op{"average"};
migraphx::op::pooling max_pool_op{"max"};
migraphx::op::pooling avg_pool_op{migraphx::op::pooling_mode::average};
migraphx::op::pooling max_pool_op{migraphx::op::pooling_mode::max};
avg_pool_op.stride = {2, 2};
max_pool_op.stride = {2, 2};
avg_pool_op.lengths = {2, 2};
......
......@@ -57,6 +57,15 @@ TEST_CASE(value_construct_string)
EXPECT(v.get_key().empty());
}
TEST_CASE(value_construct_key_string_literal_pair)
{
// Use parens instead {} to construct to test the key-pair constructor
migraphx::value v("key", "one");
EXPECT(v.is_string());
EXPECT(v.get_string() == "one");
EXPECT(v.get_key() == "key");
}
TEST_CASE(value_construct_float)
{
migraphx::value v = 1.0;
......@@ -167,6 +176,15 @@ TEST_CASE(value_copy_assign_keyless)
EXPECT(v1.without_key() == v2.without_key());
}
TEST_CASE(value_assign_key_string_literal_pair)
{
migraphx::value v = migraphx::value::object{};
v["key"] = "one";
EXPECT(v["key"].is_string());
EXPECT(v["key"].get_string() == "one");
EXPECT(v["key"].get_key() == "key");
}
TEST_CASE(value_construct_array)
{
migraphx::value v = {1, 2, 3};
......@@ -522,6 +540,14 @@ TEST_CASE(value_construct_object_string_mixed_value)
EXPECT(v.at("two").get_int64() == 2);
}
template <class Expression>
auto compare_predicate(const Expression& e)
{
bool result = e.value();
return test::make_predicate(test::as_string(e) + " => " + test::as_string(result),
[=] { return result; });
}
TEST_CASE(value_compare)
{
EXPECT(migraphx::value(1) == migraphx::value(1));
......@@ -535,6 +561,46 @@ TEST_CASE(value_compare)
EXPECT(migraphx::value(2) > migraphx::value(1));
EXPECT(migraphx::value(2) >= migraphx::value(1));
EXPECT(migraphx::value(1) >= migraphx::value(1));
EXPECT(migraphx::value(1) != migraphx::value("1"));
EXPECT(migraphx::value(1) != migraphx::value());
}
// NOLINTNEXTLINE
#define MIGRAPHX_VALUE_TEST_COMPARE(...) compare_predicate(TEST_CAPTURE(__VA_ARGS__))
// NOLINTNEXTLINE
#define EXPECT_TOTALLY_ORDERED_IMPL(_, x, y) \
EXPECT(_(x <= y) or _(x >= y)); \
EXPECT(_(x < y) or _(x > y) or _(x == y)); \
EXPECT((_(x < y) or _(x > y)) == _(x != y)); \
EXPECT(_(x < y) == _(y > x)); \
EXPECT(_(x <= y) == _(y >= x)); \
EXPECT(_(x < y) != _(x >= y)); \
EXPECT(_(x > y) != _(x <= y)); \
EXPECT(_(x == y) != _(x != y))
// NOLINTNEXTLINE
#define EXPECT_TOTALLY_ORDERED(x, y) \
EXPECT_TOTALLY_ORDERED_IMPL(MIGRAPHX_VALUE_TEST_COMPARE, x, y); \
EXPECT_TOTALLY_ORDERED_IMPL(MIGRAPHX_VALUE_TEST_COMPARE, y, x)
// NOLINTNEXTLINE(readability-function-size)
TEST_CASE(value_compare_ordered)
{
EXPECT_TOTALLY_ORDERED(migraphx::value(), migraphx::value());
EXPECT_TOTALLY_ORDERED(migraphx::value(1), migraphx::value(1));
EXPECT_TOTALLY_ORDERED(migraphx::value(1), migraphx::value(2));
EXPECT_TOTALLY_ORDERED(migraphx::value("key", 1), migraphx::value("key", 1));
EXPECT_TOTALLY_ORDERED(migraphx::value("key1", 1), migraphx::value("key2", 2));
EXPECT_TOTALLY_ORDERED(migraphx::value("key", 1), migraphx::value("key", 2));
EXPECT_TOTALLY_ORDERED(migraphx::value("key1", 1), migraphx::value("key2", 2));
EXPECT_TOTALLY_ORDERED(migraphx::value("key", 1), migraphx::value("key", "2"));
EXPECT_TOTALLY_ORDERED(migraphx::value("key1", 1), migraphx::value("key2", "2"));
EXPECT_TOTALLY_ORDERED(migraphx::value(std::int64_t{1}), migraphx::value(std::uint64_t{1}));
EXPECT_TOTALLY_ORDERED(migraphx::value(std::int64_t{1}), migraphx::value(std::uint64_t{2}));
EXPECT_TOTALLY_ORDERED(migraphx::value(std::int64_t{2}), migraphx::value(std::uint64_t{1}));
EXPECT_TOTALLY_ORDERED(migraphx::value(1), migraphx::value("1"));
EXPECT_TOTALLY_ORDERED(migraphx::value(1), migraphx::value());
}
TEST_CASE(value_to_from_string)
......@@ -835,4 +901,38 @@ TEST_CASE(value_or_null)
EXPECT(v.value_or(3) == 3);
}
TEST_CASE(value_get_default)
{
migraphx::value v = {{"key", 1}};
EXPECT(v.get("key", 3) == 1);
EXPECT(v.get("missing", 3) == 3);
}
TEST_CASE(value_get_default_vector)
{
std::vector<int> ints = {1, 2, 3};
std::vector<int> fallback = {-1};
migraphx::value v = {{"key", ints}};
EXPECT(v.get("key", fallback) == ints);
EXPECT(v.get("missing", fallback) == fallback);
EXPECT(v.get("missing", {-1}) == fallback);
}
TEST_CASE(value_get_default_string_literal)
{
migraphx::value v = {{"key", "hello"}};
EXPECT(v.get("key", "none") == "hello");
EXPECT(v.get("missing", "none") == "none");
}
TEST_CASE(value_get_default_string_literal_vector)
{
std::vector<std::string> strings = {"1", "2", "3"};
std::vector<std::string> fallback = {"none"};
migraphx::value v = {{"key", strings}};
EXPECT(v.get("key", fallback) == strings);
EXPECT(v.get("missing", fallback) == fallback);
EXPECT(v.get("missing", {"none"}) == fallback);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
file(GLOB VERIFY_TESTS *.cpp)
file(GLOB VERIFY_TESTS ${CONFIGURE_DEPENDS} *.cpp)
add_executable(test_verify ${VERIFY_TESTS})
add_dependencies(tests test_verify)
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
struct gemm_add : verify_program<gemm_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {1, 2, 3}};
migraphx::shape m2_shape{migraphx::shape::float_type, {1, 3, 4}};
migraphx::shape m3_shape{migraphx::shape::float_type, {1, 2, 4}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_parameter("2", m2_shape);
auto l3 = mm->add_parameter("3", m3_shape);
auto dot = mm->add_instruction(migraphx::make_op("dot"), l1, l2);
mm->add_instruction(migraphx::make_op("add"), dot, l3);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {16, 16, 4, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p;
}
};
......@@ -128,8 +128,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
std::future<std::pair<migraphx::program, std::vector<migraphx::argument>>>;
auto_print::set_terminate_handler(name);
if(migraphx::enabled(MIGRAPHX_DUMP_TEST{}))
migraphx::save(p, name + ".mx");
std::vector<std::pair<std::string, result_future>> results;
migraphx::save(p, name + ".mxr");
std::vector<std::string> target_names;
for(const auto& tname : migraphx::get_targets())
{
......@@ -145,6 +144,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
}
if(not target_names.empty())
{
std::vector<std::pair<std::string, result_future>> results;
migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes())
{
......
......@@ -12,7 +12,7 @@ struct test_avg_pooling_1d : verify_program<test_avg_pooling_1d>
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5}});
auto op = migraphx::op::pooling{"average", {0}, {1}, {3}};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::average, {0}, {1}, {3}};
mm->add_instruction(op, input);
return p;
}
......
......@@ -12,7 +12,8 @@ struct test_avg_pooling_3d : verify_program<test_avg_pooling_3d>
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto op = migraphx::op::pooling{"average", {1, 1, 1}, {3, 3, 3}, {3, 3, 3}};
auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}};
mm->add_instruction(op, input);
return p;
}
......
......@@ -12,7 +12,8 @@ struct test_avg_pooling_3d_opt : verify_program<test_avg_pooling_3d_opt>
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 2, 3, 3, 3}});
auto op = migraphx::op::pooling{"average", {0, 0, 0}, {1, 1, 1}, {3, 3, 3}};
auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {0, 0, 0}, {1, 1, 1}, {3, 3, 3}};
mm->add_instruction(op, input);
return p;
}
......
......@@ -10,9 +10,11 @@ struct test_avg_pooling_ceil_3d : verify_program<test_avg_pooling_ceil_3d>
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto op = migraphx::op::pooling{"average", {1, 1, 1}, {3, 3, 3}, {3, 3, 3}, true};
auto op = migraphx::op::pooling{
migraphx::op::pooling_mode::average, {1, 1, 1}, {3, 3, 3}, {3, 3, 3}, true};
mm->add_instruction(op, input);
return p;
}
......
......@@ -3,6 +3,7 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/pooling.hpp>
struct test_concat_pooling : verify_program<test_concat_pooling>
{
......@@ -18,12 +19,13 @@ struct test_concat_pooling : verify_program<test_concat_pooling>
auto concat_t = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 1, 2}}}), concat);
auto pooling = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{"padding", {0, 0}},
{"stride", {1, 1}},
{"lengths", {8, 8}}}),
concat_t);
auto pooling =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}},
{"stride", {1, 1}},
{"lengths", {8, 8}}}),
concat_t);
mm->add_instruction(migraphx::make_op("relu"), pooling);
return p;
}
......
......@@ -12,7 +12,6 @@ struct test_conv_bias_clipped_relu : verify_program<test_conv_bias_clipped_relu>
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> input_lens{4, 3, 3, 3};
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto weights =
......
......@@ -3,6 +3,7 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
{
......@@ -29,7 +30,7 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
auto relu = mm->add_instruction(migraphx::make_op("relu"), bn);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {2, 2}},
{"lengths", {3, 3}}}),
......
......@@ -3,6 +3,7 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{
......@@ -47,7 +48,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
auto add = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
auto relu = mm->add_instruction(migraphx::make_op("relu"), add);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", "average"},
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {2, 2}},
{"lengths", {3, 3}}}),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment