Commit 5af9aac0 authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_batch_pass' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_test_runner

parents 7b2516e0 05e81ed3
trilu_out_k_test:Z

x
ky"Trilutrilu_out_k_test*
:BkZ
x


b
y


B
\ No newline at end of file
trilu_row_one_test:\

x
ky"Trilutrilu_row_one_test*
:BkZ
x


b
y


B
\ No newline at end of file

trilu_test:E
xy"Trilu
trilu_testZ
x


b
y


B
\ No newline at end of file
......@@ -590,17 +590,80 @@ TEST_CASE(if_else_test)
p.compile(migraphx::ref::target{});
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
bool b_data = false;
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_data, data.data());
pp["y"] = migraphx::argument(s_data, data.data());
pp["cond"] = migraphx::argument(bool_data, &b_data);
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
-0.0364609435, 0.475317657, -0.00417715637, -0.0599277429, 0.0755792186, -0.0218581557};
std::vector<float> gold = {0.0866565, -0.371067, 0.017719, 0.0250614, 0.0612539, -0.744683};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(if_else_test_inlined)
{
migraphx::program p = migraphx::parse_onnx("if_else_test_inlined.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_data, data.data());
pp["y"] = migraphx::argument(s_data, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0507132, -0.712328, 0.0105797, 0.04569, 0.0185013, -1.16472};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(if_then_test)
{
migraphx::program p = migraphx::parse_onnx("if_then_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
bool b_data = true;
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_data, data.data());
pp["y"] = migraphx::argument(s_data, data.data());
pp["cond"] = migraphx::argument(bool_data, &b_data);
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// onnx adds ones so result should be just + 1.0
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(if_then_test_inlined)
{
migraphx::program p = migraphx::parse_onnx("if_then_test_inlined.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_data, data.data());
pp["y"] = migraphx::argument(s_data, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify_range(result_vector, gold));
}
......@@ -637,6 +700,67 @@ TEST_CASE(if_literal_test)
}
}
TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
{
migraphx::program p =
migraphx::parse_onnx("if_then_else_multi_output_shapes_inlined_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_data{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape y_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(x_data, data.data());
pp["y"] = migraphx::argument(y_data, data.data());
auto result_args = p.eval(pp);
auto result = result_args.front();
auto result_b = result_args.back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> result_vector_back;
result_b.visit([&](auto output) { result_vector_back.assign(output.begin(), output.end()); });
result_vector.insert(result_vector.end(), result_vector_back.begin(), result_vector_back.end());
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(if_then_else_multi_output_shapes_test)
{
migraphx::program p = migraphx::parse_onnx("if_then_else_multi_output_shapes_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s_data{migraphx::shape::float_type, {2, 3, 1}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
bool b_data = true;
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_data, data.data());
pp["y"] = migraphx::argument(s_data, data.data());
pp["cond"] = migraphx::argument(bool_data, &b_data);
auto result_args = p.eval(pp);
auto result = result_args.front();
auto result_b = result_args.back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> result_vector_back;
result_b.visit([&](auto output) { result_vector_back.assign(output.begin(), output.end()); });
result_vector.insert(result_vector.end(), result_vector_back.begin(), result_vector_back.end());
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(if_pl_test)
{
auto run_prog = [](bool cond) {
......@@ -1458,4 +1582,73 @@ TEST_CASE(where_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::program& p)
{
// input data filled with values 1 to nelements
std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 1);
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, x_data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(trilu_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 0, 6, 7, 8, 0, 0, 11, 12};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
std::vector<float> gold = {0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_lower_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {0, 0, 0, 0, 5, 0, 0, 0, 9, 10, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold(12, 0);
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
std::vector<float> gold = {0, 2, 3, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -831,6 +831,77 @@ TEST_CASE(gather)
}
}
TEST_CASE(gather_dyn0)
{
// Insert dynamic index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 7, 3}, {3, 3, 0}}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {2, 7, 3}, {3, 3, 0}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn1)
{
// Insert static index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {2, 2, 0}, {3, 3, 0}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn2)
{
// Insert scalar (static) index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
std::vector<std::size_t> mins;
std::vector<std::size_t> maxes;
std::vector<std::size_t> opts;
migraphx::shape indices{migraphx::shape::int32_type, mins, maxes, opts};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type, {{2, 3, 2}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn3)
{
// Insert dynamic index into static shape, axis 1
migraphx::shape input{migraphx::shape::float_type, {2, 3, 6, 12}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 3, 2}, {3, 4, 3}}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 2, 0}, {2, 3, 2}, {3, 4, 3}, {6, 6, 0}, {12, 12, 0}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn4)
{
// Insert dynamic index into static shape, axis 0
migraphx::shape input{migraphx::shape::float_type, {2, 3, 6, 12}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 3, 2}, {3, 4, 3}}};
int axis = 0;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {3, 3, 0}, {6, 6, 0}, {12, 12, 0}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(get_tuple_elem_test)
{
migraphx::shape s0{migraphx::shape::bool_type, {1, 1}};
......@@ -2290,6 +2361,18 @@ TEST_CASE(rnn)
}
}
TEST_CASE(select_module_dyn)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3}, {255, 255}, {255, 255}}};
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1000, 1000}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
expect_shape(
out_attr,
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
input);
}
TEST_CASE(slice_shape)
{
migraphx::shape input{migraphx::shape::int32_type, {2, 2, 3}};
......@@ -2303,6 +2386,70 @@ TEST_CASE(slice_shape)
expect_shape(migraphx::shape{migraphx::shape::int32_type, {2, 2, 1}, {6, 3, 1}},
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {2}}, {"ends", {10}}}),
input);
expect_shape(migraphx::shape{migraphx::shape::int32_type, {2, 2, 1}, {6, 3, 1}},
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {-1}}, {"ends", {10}}}),
input);
}
TEST_CASE(slice_dyn_shape0)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3, 0}, {7, 7, 0}, {2, 3, 0}}};
// Slice axis 1 to size 4-1=3
expect_shape(migraphx::shape{migraphx::shape::int32_type, {{2, 3, 0}, {3, 3, 0}, {2, 3, 0}}},
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {1}}, {"ends", {4}}}),
input);
}
TEST_CASE(slice_dyn_shape1)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3, 0}, {7, 7, 0}, {2, 3, 0}}};
// Slice axis 1 with negative index
expect_shape(migraphx::shape{migraphx::shape::int32_type, {{2, 3, 0}, {2, 2, 0}, {2, 3, 0}}},
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {1}}, {"ends", {-4}}}),
input);
}
TEST_CASE(slice_dyn_shape2)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3, 0}, {7, 7, 0}, {2, 3, 0}}};
// Sliced range max bigger than dimension; is clipped
expect_shape(migraphx::shape{migraphx::shape::int32_type, {{2, 3, 0}, {6, 6, 0}, {2, 3, 0}}},
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {1}}, {"ends", {10}}}),
input);
}
TEST_CASE(slice_dyn_shape3)
{
// TODO: When variable dimension slicing is allowed, Slice to a size smaller than min.
// Until then, this action is an error.
migraphx::shape input{migraphx::shape::int32_type, {{2, 3, 0}, {7, 8, 0}, {2, 3, 0}}};
throws_shape(migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {1}}}),
input);
// clang-format off
// expect_shape(migraphx::shape{migraphx::shape::int32_type, {{2, 3, 0}, {1, 1, 0}, {2, 3, 0}}},
// migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {1}}}),
// input);
// clang-format on
}
TEST_CASE(slice_dyn_shape4)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 2, 0}, {7, 7, 0}, {2, 3, 0}}};
// Slice multiple axes: axis 0 to size 2-1=1 and axis 1 to size 4-1=3
expect_shape(
migraphx::shape{migraphx::shape::int32_type, {{1, 1, 0}, {3, 3, 0}, {2, 3, 0}}},
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 1}}, {"ends", {2, 4}}}),
input);
}
TEST_CASE(slice_dyn_shape5)
{
// Axis out of range.
migraphx::shape input{migraphx::shape::int32_type, {{2, 2, 0}, {7, 7, 0}, {2, 3, 0}}};
throws_shape(
migraphx::make_op("slice", {{"axes", {0, 20}}, {"starts", {1, 1}}, {"ends", {2, 4}}}),
input);
}
TEST_CASE(softmax) { test_softmax_variations<migraphx::op::softmax>(); }
......@@ -2406,27 +2553,359 @@ TEST_CASE(test_scalar_nelemnts)
throws_shape(migraphx::make_op("scalar", {{"scalar_bcst_dims", {2, 3, 4, 5}}}), input);
}
TEST_CASE(test_scatternd)
TEST_CASE(test_gathernd)
{
{
// k > r
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 4}};
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 2}};
migraphx::shape us{dtype, {4}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
int batch_dims(1);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
{
// update.lens != indices.lens[0:q-1] ++ data.lens[k:r-1]
// k > r - batch_dims
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 4}};
migraphx::shape ds{dtype, {2}};
int batch_dims(1);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
{
// batch_dims >= r
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 1}};
migraphx::shape ds{dtype, {2, 5, 6, 7}};
int batch_dims(3);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
{
// int(q) + r - k - batch_dims - 1 = 0 => returns a scalar
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {1}};
migraphx::shape ds{dtype, {2}};
migraphx::shape s0{dtype, {1}};
expect_shape(s0, migraphx::make_op("gathernd"), ds, is);
}
{
// See Example 4 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 2}};
migraphx::shape ds{dtype, {2, 2}};
migraphx::shape s0{dtype, {2}};
expect_shape(s0, migraphx::make_op("gathernd"), ds, is);
}
{
// See Example 5 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 1}};
migraphx::shape ds{dtype, {2, 2, 2}};
int batch_dims(1);
migraphx::shape s0{dtype, {2, 2}};
expect_shape(s0, migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
}
TEST_CASE(test_gathernd_dynamic0)
{
// k > r
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 4}};
std::vector<migraphx::shape::dynamic_dimension> b{{8, 8, 0}};
migraphx::shape ds{dtype, b};
int batch_dims(1);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic1)
{
// k > r - batch_dims
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 4}};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 2, 0}};
migraphx::shape ds{dtype, b};
int batch_dims(1);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic2)
{
// batch_dims >= r
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 1}};
migraphx::shape ds{dtype, {{2, 3, 3}, {5, 6, 5}, {6, 9, 7}, {7, 8, 8}}};
int batch_dims(3);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic3)
{
// int(q) + r - k - batch_dims - 1 = 0 => returns a scalar
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {1}};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 2, 0}};
migraphx::shape ds{dtype, b};
migraphx::shape::dynamic_dimension ddout{1, 1, 0};
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd"), ds, is);
}
TEST_CASE(test_gathernd_dynamic4)
{
// See Example 1 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 2}};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 2, 0}, {2, 2, 0}};
migraphx::shape ds{dtype, b};
migraphx::shape::dynamic_dimension ddout{2, 2, 0};
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd"), ds, is);
}
TEST_CASE(test_gathernd_dynamic5)
{
// See Example 5 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
// index static shape, data dynamic
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 1}};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 2, 0}, {2, 2, 0}, {2, 2, 0}};
migraphx::shape ds{dtype, b};
std::vector<migraphx::shape::dynamic_dimension> ddout{{2, 2, 0}, {2, 2, 0}};
int batch_dims(1);
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic6)
{
// See Example 5 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
// index dynamic shape, data static
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
std::vector<migraphx::shape::dynamic_dimension> b{{2, 3, 0}, {1, 1, 0}};
migraphx::shape is{itype, b};
migraphx::shape ds{dtype, {2, 2, 2}};
std::vector<migraphx::shape::dynamic_dimension> ddout{{2, 3, 0}, {2, 2, 0}};
int batch_dims(1);
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic6a)
{
// indices with non-fixed dynamic dimension k
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
std::vector<migraphx::shape::dynamic_dimension> b{{2, 2, 0}, {1, 3, 0}};
migraphx::shape is{itype, b};
migraphx::shape ds{dtype, {2, 2, 2}};
int batch_dims(1);
throws_shape(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic7)
{
// See Example 5 at https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND
// index and data both dynamic shapes
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
std::vector<migraphx::shape::dynamic_dimension> idyn{{2, 5, 0}, {1, 1, 0}};
migraphx::shape is{itype, idyn};
std::vector<migraphx::shape::dynamic_dimension> bdyn{{1, 2, 0}, {1, 2, 0}, {1, 2, 0}};
migraphx::shape ds{dtype, bdyn};
std::vector<migraphx::shape::dynamic_dimension> ddout{{2, 5, 0}, {1, 2, 0}};
int batch_dims(1);
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_gathernd_dynamic8)
{
// Same shapes as ref_ops_test gathernd_dynamic
// index static shape, data dynamic
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape is{itype, {2, 5, 1}};
std::vector<migraphx::shape::dynamic_dimension> b{{6, 7, 7}, {3, 3, 0}, {1, 4, 0}};
migraphx::shape ds{dtype, b};
std::vector<migraphx::shape::dynamic_dimension> ddout{{2, 2, 0}, {5, 5, 0}, {1, 4, 0}};
int batch_dims(1);
migraphx::shape s0{dtype, {ddout}};
expect_shape(s0, migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), ds, is);
}
TEST_CASE(test_scatternd0)
{
// good
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 1}};
migraphx::shape us{dtype, {4}};
expect_shape(ds, migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd1)
{
// good, broadcasted
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 1}, {4, 0}};
migraphx::shape us{dtype, {4}};
expect_shape(ds, migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd2)
{
// too many inputs
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 1}};
migraphx::shape us{dtype, {4}};
migraphx::shape zs{dtype, {4}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us, zs);
}
TEST_CASE(test_scatternd3)
{
// q + r - k - 1 matches upd_lens.size(), but k > r
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {5, 4, 2}};
migraphx::shape us{dtype, {4}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd4)
{
// q + r - k - 1 != upd_lens.size()
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 1}};
migraphx::shape us{dtype, {2, 2}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
}
TEST_CASE(test_scatternd5)
{
// dimensions don't match: update.lens != indices.lens[0:q-1]
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8, 3}};
migraphx::shape is{itype, {4, 1}};
migraphx::shape us{dtype, {2, 2}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn0)
{
// one dynamic input, invalid index
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {4}};
migraphx::shape is{itype, {4, 13}};
migraphx::shape::dynamic_dimension dd{4, 4, 0};
migraphx::shape us{dtype, {dd}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn1)
{
// one dynamic input
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {8}};
migraphx::shape is{itype, {4, 1}};
migraphx::shape::dynamic_dimension dd{4, 4, 0};
migraphx::shape us{dtype, {dd}};
expect_shape(ds, migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn2)
{
// one dynamic input and broadcasted data
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {2, 3, 1, 4}, {0, 1, 1, 0}};
migraphx::shape ds_std{dtype, {2, 3, 1, 4}};
migraphx::shape is{itype, {4, 4}};
migraphx::shape::dynamic_dimension dd{4, 4, 0};
migraphx::shape us{dtype, {dd}};
expect_shape(ds_std, migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn3)
{
// one dynamic input and standard, static data
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {2, 3, 1, 4}};
migraphx::shape is{itype, {4, 4}};
migraphx::shape::dynamic_dimension dd{4, 4, 0};
migraphx::shape us{dtype, {dd}};
expect_shape(ds, migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn4)
{
// index is dynamic with last dimension not fixed
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {2, 3, 1, 4}};
migraphx::shape::dynamic_dimension dd{4, 5, 0};
migraphx::shape is{itype, {dd, dd}};
migraphx::shape us{dtype, {dd}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_scatternd_dyn5)
{
// dimensions don't match: update.lens != indices.lens[0:q-1]
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape ds{dtype, {2, 3, 1, 4}};
migraphx::shape::dynamic_dimension dd{4, 4, 0};
migraphx::shape::dynamic_dimension dbad{2, 3, 0};
migraphx::shape is{itype, {dd, dd}};
migraphx::shape us{dtype, {dbad}};
throws_shape(migraphx::make_op("scatternd_none"), ds, is, us);
}
TEST_CASE(test_squeeze)
......@@ -2754,6 +3233,42 @@ TEST_CASE(where_broadcast_input)
expect_shape(s2, migraphx::make_op("where"), s3, s1, s2);
}
TEST_CASE(where_dyn_input0)
{
// dynamic shapes not the same
migraphx::shape s1{migraphx::shape::float_type, {{2, 3, 0}, {3, 3, 0}}};
migraphx::shape s2{migraphx::shape::float_type, {{2, 3, 0}, {2, 3, 0}}};
migraphx::shape s3{migraphx::shape::bool_type, {2, 2}};
throws_shape(migraphx::make_op("where"), s3, s1, s2);
}
TEST_CASE(where_dyn_input1)
{
// mixed static/dynamic inputs (not allowed)
migraphx::shape s1{migraphx::shape::float_type, {2, 2}, {2, 1}};
migraphx::shape s2{migraphx::shape::float_type, {{2, 2, 0}, {2, 2, 0}}};
migraphx::shape s3{migraphx::shape::bool_type, {2, 2}, {2, 1}};
throws_shape(migraphx::make_op("where"), s3, s1, s2);
}
TEST_CASE(where_dyn_input2)
{
// dynamic shapes
migraphx::shape s1{migraphx::shape::float_type, {{2, 3, 0}, {3, 3, 0}}};
migraphx::shape s2{migraphx::shape::float_type, {{2, 3, 0}, {3, 3, 0}}};
migraphx::shape s3{migraphx::shape::bool_type, {{2, 3, 0}, {3, 3, 0}}};
expect_shape(s2, migraphx::make_op("where"), s3, s1, s2);
}
TEST_CASE(where_dyn_input3)
{
// dynamic shapes, predicate shape is different
migraphx::shape s1{migraphx::shape::float_type, {{2, 3, 0}, {3, 3, 0}}};
migraphx::shape s2{migraphx::shape::float_type, {{2, 3, 0}, {3, 3, 0}}};
migraphx::shape s3{migraphx::shape::bool_type, {{2, 3, 0}, {3, 4, 0}}};
throws_shape(migraphx::make_op("where"), s3, s1, s2);
}
TEST_CASE(roialign_test)
{
migraphx::shape sx{migraphx::shape::float_type, {3, 4, 5, 6}};
......@@ -2776,4 +3291,52 @@ TEST_CASE(roialign_test)
throws_shape(migraphx::make_op("roialign"), sx, srois2, sbi);
}
TEST_CASE(test_concat)
{
migraphx::shape sx{migraphx::shape::float_type, {3, 4, 5, 6}};
migraphx::shape sy{migraphx::shape::float_type, {3, 4, 1, 6}};
migraphx::shape sout{migraphx::shape::float_type, {3, 4, 6, 6}};
expect_shape(sout, migraphx::make_op("concat", {{"axis", 2}}), sx, sy);
// axis out of range
throws_shape(migraphx::make_op("concat", {{"axis", 11}}), sx, sy);
// 1 input; no-op
expect_shape(sx, migraphx::make_op("concat", {{"axis", 2}}), sx);
// rank doesn't match
migraphx::shape sbi1{migraphx::shape::int64_type, {2, 3}};
throws_shape(migraphx::make_op("concat", {{"axis", 0}}), sx, sbi1);
// non-matching dimension 2
throws_shape(migraphx::make_op("concat", {{"axis", 1}}), sx, sy);
// no input shapes (at least one is required)
throws_shape(migraphx::make_op("concat", {{"axis", 0}}));
}
TEST_CASE(test_dyn_concat)
{
migraphx::shape sx{migraphx::shape::float_type, {{1, 3, 3}, {4, 4}, {1, 5, 5}, {6, 6}}};
migraphx::shape sy{migraphx::shape::float_type, {{1, 3, 3}, {4, 4}, {1, 4, 4}, {6, 6}}};
migraphx::shape sout{migraphx::shape::float_type, {{1, 3, 3}, {4, 4, 0}, {2, 9, 0}, {6, 6}}};
expect_shape(sout, migraphx::make_op("concat", {{"axis", 2}}), sx, sy);
// axis out of range
throws_shape(migraphx::make_op("concat", {{"axis", 4}}), sx, sy);
// rank doesn't match
migraphx::shape srank{migraphx::shape::int64_type, {{1, 3, 3}, {4, 4}}};
throws_shape(migraphx::make_op("concat", {{"axis", 0}}), sx, srank);
// non-matching dimension 2
throws_shape(migraphx::make_op("concat", {{"axis", 1}}), sx, sy);
// static and dynamic shapes together
migraphx::shape sstat{migraphx::shape::float_type, {3, 4, 1, 6}};
throws_shape(migraphx::make_op("concat", {{"axis", 2}}), sx, sstat);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -51,7 +51,7 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
np.testing.assert_equal(ref_outputs[i].dtype,
outputs[i].dtype,
err_msg=prog_string)
if ref_outputs[i].dtype == np.object:
if ref_outputs[i].dtype == object:
np.testing.assert_array_equal(ref_outputs[i],
outputs[i],
err_msg=prog_string)
......
......@@ -33,7 +33,8 @@ def test_conv_relu():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
# set offload_copy, fast_match and exhaustive_tune to true
p.compile(migraphx.get_target("gpu"), True, True, True)
print(p)
params = {}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -953,6 +953,41 @@ TEST_CASE(concat_test)
}
}
TEST_CASE(concat_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
int axis = 0;
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, 2}, {2, 3, 2}}};
migraphx::shape s1{migraphx::shape::int32_type, {{3, 4, 4}, {2, 3, 2}}};
migraphx::shape s2{migraphx::shape::int32_type, {{1, 5, 3}, {2, 3, 2}}};
auto input0 = mm->add_parameter("X", s0);
auto input1 = mm->add_parameter("Y", s1);
auto input2 = mm->add_parameter("Z", s2);
mm->add_instruction(migraphx::make_op("concat", {{"axis", axis}}), input0, input1, input2);
p.compile(migraphx::ref::target{});
migraphx::shape static_shape0{migraphx::shape::int32_type, {2, 2}};
migraphx::shape static_shape1{migraphx::shape::int32_type, {3, 2}};
migraphx::shape static_shape2{migraphx::shape::int32_type, {1, 2}};
std::vector<int> data0 = {0, 1, 2, 3};
std::vector<int> data1 = {4, 5, 6, 7, 8, 9};
std::vector<int> data2 = {10, 11};
migraphx::parameter_map params;
params["X"] = migraphx::argument(static_shape0, data0.data());
params["Y"] = migraphx::argument(static_shape1, data1.data());
params["Z"] = migraphx::argument(static_shape2, data2.data());
auto result = p.eval(params).back();
std::vector<int> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(migraphx::verify_range(result.get_shape().lens(), std::vector<std::size_t>({6, 2})));
}
TEST_CASE(contiguous_test)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
......@@ -2524,6 +2559,78 @@ TEST_CASE(gather_test)
}
}
TEST_CASE(gather_dyn_test0)
{
// Dynamic data, static indices
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 5, 0}, {3, 3, 0}}};
auto x = mm->add_parameter("x", s);
std::vector<int> indices{1, 2};
migraphx::shape s_ind{migraphx::shape::int32_type, {1, 2}};
auto ind = mm->add_parameter("indices", s_ind);
mm->add_instruction(migraphx::make_op("gather", {{"axis", 1}}), x, ind);
migraphx::shape sresult{migraphx::shape::int32_type, {{2, 5, 0}, {1, 1, 0}, {2, 2, 0}}};
EXPECT(p.get_output_shapes().back() == sresult);
p.compile(migraphx::ref::target{});
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {2, 3}};
migraphx::shape input_indices{migraphx::shape::int32_type, {1, 2}};
migraphx::parameter_map params;
std::vector<int> data(2 * 3);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
params["indices"] = migraphx::argument(input_indices, indices.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 4, 5};
std::vector<int> results_vector(2 * 1 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
migraphx::shape sfinal{migraphx::shape::int32_type, {2, 1, 2}};
EXPECT(result.get_shape() == sfinal);
}
TEST_CASE(gather_dyn_test1)
{
// Dynamic data, dynamic indices
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 5, 0}, {4, 4, 0}}};
auto x = mm->add_parameter("x", s);
migraphx::shape s_ind{migraphx::shape::int32_type, {{1, 8, 7}, {2, 3, 3}}};
auto ind = mm->add_parameter("indices", s_ind);
mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), x, ind);
migraphx::shape sresult{migraphx::shape::int32_type, {{1, 8, 7}, {2, 3, 3}, {4, 4, 0}}};
EXPECT(p.get_output_shapes().back() == sresult);
p.compile(migraphx::ref::target{});
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {3, 4}};
migraphx::shape input_indices_shape{migraphx::shape::int32_type, {1, 2}};
std::vector<int> indices{2, 0};
migraphx::parameter_map params;
std::vector<int> data(3 * 4);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
params["indices"] = migraphx::argument(input_indices_shape, indices.data());
auto result = p.eval(params).back();
std::vector<int> gold = {8, 9, 10, 11, 0, 1, 2, 3};
std::vector<int> results_vector(1 * 2 * 4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
migraphx::shape sfinal{migraphx::shape::int32_type, {1, 2, 4}};
EXPECT(result.get_shape() == sfinal);
}
TEST_CASE(gathernd_test)
{
{
......@@ -2674,6 +2781,187 @@ TEST_CASE(gathernd_test)
}
}
TEST_CASE(gathernd_dynamic0)
{
// dynamic data, all dimensions fixed
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {{2, 2, 2}, {3, 3, 0}, {1, 1, 0}}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 1}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto gathernd_op = migraphx::make_op("gathernd");
auto gathernd = mm->add_instruction(gathernd_op, xdata, xindex);
mm->add_return({gathernd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3, 1}}; // data
migraphx::shape input_fixed_shape1{migraphx::shape::int64_type, {2, 2, 1}}; // index
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
params["X"] = migraphx::argument(input_fixed_shape0, data_vec.data());
params["I"] = migraphx::argument(input_fixed_shape1, indices_vec.data());
auto result = p.eval(params).back();
std::vector<float> res_data{};
std::vector<float> gold{3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
TEST_CASE(gathernd_dynamic1)
{
// dynamic data, dims not fixed
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {{2, 5, 2}, {1, 5, 0}, {1, 5, 0}}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 1}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto gathernd_op = migraphx::make_op("gathernd");
auto gathernd = mm->add_instruction(gathernd_op, xdata, xindex);
mm->add_return({gathernd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3, 1}}; // data
migraphx::shape input_fixed_shape1{migraphx::shape::int64_type, {2, 2, 1}}; // index
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
params["X"] = migraphx::argument(input_fixed_shape0, data_vec.data());
params["I"] = migraphx::argument(input_fixed_shape1, indices_vec.data());
auto result = p.eval(params).back();
std::vector<float> res_data{};
std::vector<float> gold{3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
TEST_CASE(gathernd_dynamic2)
{
// dynamic both index and data
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {{2, 5, 2}, {1, 5, 0}, {1, 5, 0}}};
migraphx::shape is{migraphx::shape::int64_type, {{2, 5, 3}, {2, 3, 3}, {1, 1}}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto gathernd_op = migraphx::make_op("gathernd");
auto gathernd = mm->add_instruction(gathernd_op, xdata, xindex);
mm->add_return({gathernd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3, 1}}; // data
migraphx::shape input_fixed_shape1{migraphx::shape::int64_type, {2, 2, 1}}; // index
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
params["X"] = migraphx::argument(input_fixed_shape0, data_vec.data());
params["I"] = migraphx::argument(input_fixed_shape1, indices_vec.data());
auto result = p.eval(params).back();
std::vector<float> res_data{};
std::vector<float> gold{3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
TEST_CASE(gathernd_dynamic3)
{
// dynamic index, static data and a batch_dims input
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape is{migraphx::shape::int64_type, {{2, 5, 3}, {2, 3, 3}, {1, 1}}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
int batch_dims{1};
auto gathernd_op = migraphx::make_op("gathernd", {{"batch_dims", batch_dims}});
auto gathernd = mm->add_instruction(gathernd_op, xdata, xindex);
mm->add_return({gathernd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3, 1}}; // data
migraphx::shape input_fixed_shape1{migraphx::shape::int64_type, {2, 2, 1}}; // index
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
params["X"] = migraphx::argument(input_fixed_shape0, data_vec.data());
params["I"] = migraphx::argument(input_fixed_shape1, indices_vec.data());
auto result = p.eval(params).back();
std::vector<float> res_data{};
std::vector<float> gold{1, 0, 3, 4};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
TEST_CASE(gathernd_dynamic4)
{
// int(q) + r - k - batch_dims - 1 = 0 => returns a scalar
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type,
{migraphx::shape::dynamic_dimension({2, 2, 0})}};
migraphx::shape is{migraphx::shape::int64_type, {1}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto gathernd_op = migraphx::make_op("gathernd");
auto gathernd = mm->add_instruction(gathernd_op, xdata, xindex);
mm->add_return({gathernd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2}}; // data
migraphx::shape input_fixed_shape1{migraphx::shape::int64_type, {1}}; // index
std::vector<float> data_vec(2);
std::iota(data_vec.begin(), data_vec.end(), 4);
std::vector<int64_t> indices_vec{1};
params["X"] = migraphx::argument(input_fixed_shape0, data_vec.data());
params["I"] = migraphx::argument(input_fixed_shape1, indices_vec.data());
auto result = p.eval(params).back();
std::vector<float> res_data{};
std::vector<float> gold{5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
TEST_CASE(gathernd_negative_index_test)
{
{
......@@ -6989,6 +7277,269 @@ TEST_CASE(scatternd_reduction_test)
}
}
TEST_CASE(select_module_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm->add_literal(migraphx::literal{lit_s, {6}});
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins = submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 4}};
params["data"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{2, 14, 5, 10, 5, 14, 14, 2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(select_module_reduce_test0)
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 2, 2}};
params["data"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{-5, 12, 7, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(select_module_reduce_test1)
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4, -4, 8, -1, 4, -1, 8, 8, -4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {4, 2, 2}};
params["data"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{-5, 12, 7, 4, -5, 12, 7, 4};
}
TEST_CASE(scatternd_reduction_dyn_test)
{
// reduction = add, with dynamic input shapes
migraphx::program p;
auto* mm = p.get_main_module();
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape::dynamic_dimension dd{3, 6, 0};
migraphx::shape ds{migraphx::shape::float_type, {dd, dd, dd}};
migraphx::shape is{itype, {2, 1}};
migraphx::shape us{dtype, {{2, 2, 0}, dd, dd}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto xupdates = mm->add_parameter("U", us);
auto scatternd_add_op = migraphx::make_op("scatternd_add");
auto scatternd = mm->add_instruction(scatternd_add_op, xdata, xindex, xupdates);
mm->add_return({scatternd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {4, 4, 4}}; // data
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6,
7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4,
5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<uint64_t> input_index{0, 2};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {2, 4, 4}}; // updates
std::vector<float> input_updates{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4};
params["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
params["I"] = migraphx::argument(is, input_index.data());
params["U"] = migraphx::argument(input_fixed_shape1, input_updates.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{6, 7, 8, 9, 11, 12, 13, 14, 15, 14, 13, 12, 12, 11, 10, 9,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
9, 8, 7, 6, 6, 5, 4, 3, 4, 5, 6, 7, 9, 10, 11, 12,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = mm->add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
mm->add_instruction(migraphx::make_op("sigmoid"), l);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 0}, {2, 2, 0}}};
auto input = mm->add_parameter("X", s);
mm->add_instruction(migraphx::make_op("sigmoid"), input);
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-1, 2, -3, 4};
migraphx::parameter_map params0;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 2}};
params0["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(scatternd_reduction_dyn_test)
{
// reduction = add, with dynamic input shapes
migraphx::program p;
auto* mm = p.get_main_module();
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape::dynamic_dimension dd{3, 6, 0};
migraphx::shape ds{migraphx::shape::float_type, {dd, dd, dd}};
migraphx::shape is{itype, {2, 1}};
migraphx::shape us{dtype, {{2, 2, 0}, dd, dd}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto xupdates = mm->add_parameter("U", us);
auto scatternd_add_op = migraphx::make_op("scatternd_add");
auto scatternd = mm->add_instruction(scatternd_add_op, xdata, xindex, xupdates);
mm->add_return({scatternd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {4, 4, 4}}; // data
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6,
7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4,
5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<uint64_t> input_index{0, 2};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {2, 4, 4}}; // updates
std::vector<float> input_updates{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4};
params["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
params["I"] = migraphx::argument(is, input_index.data());
params["U"] = migraphx::argument(input_fixed_shape1, input_updates.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{6, 7, 8, 9, 11, 12, 13, 14, 15, 14, 13, 12, 12, 11, 10, 9,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
9, 8, 7, 6, 6, 5, 4, 3, 4, 5, 6, 7, 9, 10, 11, 12,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
......@@ -7188,6 +7739,69 @@ TEST_CASE(slice_test)
}
}
TEST_CASE(slice_dyn_test0)
{
// Slice a single dynamic dimension. ax1 slice limits are smaller than min; ax2 "ends" is
// too large
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 3, 0}, {2, 2, 0}, {3, 3, 0}}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(
migraphx::make_op("slice", {{"axes", {1, 2}}, {"starts", {0, 1}}, {"ends", {1, 6}}}), x);
migraphx::shape s2{migraphx::shape::int32_type, {{2, 3, 0}, {1, 1, 0}, {2, 2, 0}}};
EXPECT(p.get_output_shapes().back() == s2);
p.compile(migraphx::ref::target{});
// the strides of sresult are those of the original shape, not
// reduced to sliced size.
migraphx::shape sresult{migraphx::shape::int32_type, {2, 1, 2}, {6, 3, 1}};
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {2, 2, 3}};
migraphx::parameter_map params;
std::vector<int> data(2 * 2 * 3);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 7, 8};
std::vector<int> results_vector(2 * 1 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(result.get_shape() == sresult);
}
TEST_CASE(slice_dyn_test1)
{
// Slice all three dynamic dimensions
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 2, 0}, {2, 2, 0}, {3, 3, 0}}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(
migraphx::make_op("slice",
{{"axes", {0, 1, 2}}, {"starts", {0, 0, 0}}, {"ends", {2, 2, 2}}}),
x);
migraphx::shape s2{migraphx::shape::int32_type, {{2, 2, 0}, {2, 2, 0}, {2, 2, 0}}};
EXPECT(p.get_output_shapes().back() == s2);
p.compile(migraphx::ref::target{});
migraphx::shape sresult{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}};
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {2, 2, 3}};
migraphx::parameter_map params;
std::vector<int> data(2 * 2 * 3);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {0, 1, 3, 4, 6, 7, 9, 10};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(result.get_shape() == sresult);
}
TEST_CASE(softmax_simple_test)
{
migraphx::program p;
......@@ -7812,6 +8426,37 @@ TEST_CASE(where_test)
EXPECT(migraphx::verify_range(result_vec, gold));
}
TEST_CASE(where_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {{2, 3, 0}, {2, 3, 0}}};
migraphx::shape sx{migraphx::shape::float_type, {{2, 3, 0}, {2, 3, 0}}};
auto lb = mm->add_parameter("predicate", sb);
auto lx = mm->add_parameter("X", sx);
auto ly = mm->add_parameter("Y", sx);
mm->add_instruction(migraphx::make_op("where"), lb, lx, ly);
p.compile(migraphx::ref::target{});
std::vector<char> b{1, 1, 1, 0, 0, 0, 1, 0, 1};
std::vector<float> x(9, 1.0);
std::vector<float> y(9, 2.0);
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {3, 3}};
migraphx::shape input_fixed_shape1{migraphx::shape::uint8_type, {3, 3}};
params["X"] = migraphx::argument(input_fixed_shape0, x.data());
params["Y"] = migraphx::argument(input_fixed_shape0, y.data());
params["predicate"] = migraphx::argument(input_fixed_shape1, b.data());
auto result = p.eval(params).back();
std::vector<float> results_vector(3 * 3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 1, 1, 2, 2, 2, 1, 2, 1};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(where_broadcasted_inputs_test)
{
migraphx::program p;
......
......@@ -61,6 +61,8 @@ struct reflectable_type
}
};
std::vector<nested_type> nested_types = {};
std::tuple<int, nested_type, std::string> tuple_items = std::make_tuple(0, nested_type{0}, "");
migraphx::optional<int> opt_value = migraphx::nullopt;
template <class Self, class F>
static auto reflect(Self& self, F f)
......@@ -71,7 +73,8 @@ struct reflectable_type
f(self.et, "et"),
f(self.se, "se"),
f(self.ce, "ce"),
f(self.nested_types, "nested_types"));
f(self.nested_types, "nested_types"),
f(self.tuple_items, "tuple_items"));
}
};
......@@ -83,7 +86,9 @@ TEST_CASE(serialize_reflectable_type)
{},
reflectable_type::simple1,
reflectable_type::class_enum::class2,
{{1}, {2}}};
{{1}, {2}},
{5, {4}, "hello"},
{migraphx::nullopt}};
migraphx::value v1 = migraphx::to_value(t1);
reflectable_type t2 = migraphx::from_value<reflectable_type>(v1);
migraphx::value v2 = migraphx::to_value(t2);
......@@ -125,6 +130,21 @@ TEST_CASE(serialize_empty_struct)
EXPECT(v.at("a").to<int>() == 1);
}
TEST_CASE(serialize_empty_optional)
{
migraphx::optional<int> x{};
migraphx::value v = migraphx::to_value(x);
EXPECT(v.is_null());
}
TEST_CASE(serialize_optional)
{
migraphx::optional<int> x{2};
migraphx::value v = migraphx::to_value(x);
EXPECT(v.is_int64());
EXPECT(v.to<int>() == 2);
}
TEST_CASE(from_value_binary)
{
std::vector<std::uint8_t> data(10);
......
......@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT(s3 != s4);
}
TEST_CASE(any_of_dynamic_true)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s0{sub_shapes};
EXPECT(s0.any_of_dynamic());
sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 1}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s1{sub_shapes};
EXPECT(s1.any_of_dynamic());
}
TEST_CASE(any_of_dynamic_false)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {1, 4}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s{sub_shapes};
EXPECT(not s.any_of_dynamic());
}
TEST_CASE(test_shape_packed)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}};
......
......@@ -559,6 +559,32 @@ TEST_CASE(simplify_inner_broadcast2)
EXPECT(m1 == m2);
}
TEST_CASE(simplify_inner_broadcast_scalar)
{
auto b = migraphx::op::multibroadcast{{32, 384}};
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1, 384}});
auto y = m1.add_parameter("y", {migraphx::shape::int32_type, {1, 1}});
auto xb = m1.add_instruction(b, x);
auto yb = m1.add_instruction(b, y);
auto sum = m1.add_instruction(migraphx::make_op("add"), xb, yb);
m1.add_instruction(pass_op{}, sum);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1, 384}});
auto y = m2.add_parameter("y", {migraphx::shape::int32_type, {1, 1}});
auto yb = m2.add_instruction(migraphx::op::multibroadcast{{1, 384}}, y);
auto sum = m2.add_instruction(migraphx::make_op("add"), x, yb);
auto sumb = m2.add_instruction(b, sum);
m2.add_instruction(pass_op{}, sumb);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_add_conv1)
{
migraphx::module m;
......@@ -1041,16 +1067,18 @@ TEST_CASE(simplify_neg_unit_mult_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto unit = m1.add_literal(-1);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1, 6}});
auto unit = m1.add_literal(
migraphx::literal{{migraphx::shape::int32_type, {1, 6}}, std::vector<int>(6, -1)});
m1.add_instruction(migraphx::make_op("mul"), x, unit);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1, 6}});
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT((m1 == m2));
......@@ -1069,7 +1097,29 @@ TEST_CASE(simplify_neg_unit_mult_const2)
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT((m1 == m2));
}
TEST_CASE(simplify_neg_unit_mult_const_add)
{
migraphx::module m1;
{
auto unit = m1.add_literal(-1);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto x2 = m1.add_instruction(migraphx::make_op("mul"), unit, x);
m1.add_instruction(migraphx::make_op("add"), x2, x2);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("add"), x2, x2);
}
EXPECT((m1 == m2));
......@@ -1092,7 +1142,8 @@ TEST_CASE(simplify_neg_unit_mul_const_vec)
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
......@@ -1115,7 +1166,8 @@ TEST_CASE(simplify_neg_unit_mul_const_vec2)
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
......@@ -1134,7 +1186,8 @@ TEST_CASE(simplify_neg_unit_div_const)
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
......@@ -1157,7 +1210,8 @@ TEST_CASE(simplify_neg_unit_div_const_vec)
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
......@@ -1217,7 +1271,8 @@ TEST_CASE(simplify_sub_neg_zero_const)
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
}
......@@ -1239,7 +1294,8 @@ TEST_CASE(simplify_sub_neg_zero_const_vec)
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
auto x2 = m2.add_instruction(migraphx::make_op("neg"), x);
m2.add_instruction(migraphx::make_op("identity"), x2);
}
EXPECT(m1 == m2);
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/split_single_dyn_dim.hpp>
#include <migraphx/program.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <test.hpp>
void run_pass(migraphx::program& p)
{
migraphx::run_passes(p, {migraphx::split_single_dyn_dim{}, migraphx::dead_code_elimination{}});
}
TEST_CASE(dynamic_batch)
{
// slightly different from ref_ops_test in that the literal is copied over the submodules
// different compiler pass will pull the literals from the submodules to the main module
migraphx::program p0;
{
auto* mm0 = p0.get_main_module();
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, std::string module_name) {
auto* submod = p0.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = submod->add_literal(migraphx::literal{lit_s, {6}});
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins =
submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input0 = mm0->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm0->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input0},
{batch1, batch2, batch3, batch4});
mm0->add_return({sm_ins});
}
migraphx::program p1;
{
auto* mm1 = p1.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input1 = mm1->add_parameter("data", s);
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm1->add_literal(migraphx::literal{lit_s, {6}});
auto broadcast_lit =
mm1->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, input1);
auto add_ins = mm1->add_instruction(migraphx::make_op("add"), input1, broadcast_lit);
mm1->add_return({add_ins});
}
run_pass(p1);
EXPECT(p0 == p1);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -184,9 +184,18 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
std::vector<std::pair<std::string, result_future>> results;
migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes())
{
if(x.second.dynamic())
{
// create static shape using maximum dimensions
migraphx::shape static_shape{x.second.type(), x.second.max_lens()};
m[x.first] = migraphx::generate_argument(static_shape, get_hash(x.first));
}
else
{
m[x.first] = migraphx::generate_argument(x.second, get_hash(x.first));
}
}
auto gold_f = detach_async([=] { return run_ref(p, m); });
for(const auto& tname : target_names)
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_reduce_mean_large_half : verify_program<test_reduce_mean_large_half>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {1, 32, 65536}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2}}}), x);
return p;
};
};
......@@ -76,3 +76,16 @@ struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
return p;
};
};
struct test_large_reduce_mean : verify_program<test_large_reduce_mean>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 256 * 256 * 16}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_add : verify_program<test_select_module_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm->add_literal(migraphx::literal{lit_s, {6}});
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins =
submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_reduce : verify_program<test_select_module_reduce>
{
migraphx::program create_program() const
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
......@@ -25,6 +25,7 @@ import argparse
import numpy as np
import migraphx
import onnxruntime as ort
import sys
def parse_args():
......@@ -33,12 +34,10 @@ def parse_args():
'MIGraphX accuracy checker. Use to verify onnx files to ensure MIGraphX\'s output \
is within tolerance of onnx runtime\'s expected output.'
)
req_args = parser.add_argument_group(title='required arguments')
req_args.add_argument('--onnx',
type=str,
required=True,
help='path to onnx file')
req_args.add_argument('--provider',
file_args = parser.add_argument_group(title='file type arguments')
file_args.add_argument('--onnx', type=str, help='path to onnx file')
file_args.add_argument('--tf', type=str, help='path to tf pb file')
parser.add_argument('--provider',
type=str,
default='CPUExecutionProvider',
help='execution provider for onnx runtime \
......@@ -50,6 +49,9 @@ def parse_args():
parser.add_argument('--fill1',
action='store_true',
help='fill all arguments with a value of 1')
parser.add_argument('--fill0',
action='store_true',
help='fill all arguments with a value of 0')
parser.add_argument('--verbose',
action='store_true',
help='show verbose information (for debugging)')
......@@ -57,6 +59,12 @@ def parse_args():
type=float,
default=1e-3,
help='accuracy tolerance (default = 1e-3)')
parser.add_argument('--input-dim',
type=str,
action='append',
help='specify input parameter dimension \
with the following format --input_dim input_name:dim0,dim1,dim2...'
)
args = parser.parse_args()
return args
......@@ -111,42 +119,127 @@ def get_np_datatype(in_type):
def main():
args = parse_args()
use_onnx = True
if args.onnx == None:
use_onnx = False
if not use_onnx and args.tf == None:
print('Error: please specify either an onnx or tf pb file')
sys.exit(-1)
model_name = args.onnx
batch = args.batch
custom_inputs = args.input_dim
input_dims = {}
if custom_inputs != None:
for input in custom_inputs:
input_dim = ''.join(input.split(':')[:-1])
dims = [int(dim) for dim in input.split(':')[-1].split(',')]
input_dims[input_dim] = dims
if use_onnx:
if not input_dims:
model = migraphx.parse_onnx(model_name, default_dim_value=batch)
else:
model = migraphx.parse_onnx(model_name,
default_dim_value=batch,
map_input_dims=input_dims)
else:
model_name = args.tf
if not input_dims:
model = migraphx.parse_tf(model_name, batch_size=batch)
else:
model = migraphx.parse_tf(model_name,
batch_size=batch,
map_input_dims=input_dims)
if args.verbose:
print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False)
model.compile(migraphx.get_target('gpu'))
params = {}
test_inputs = {}
for name, shape in model.get_parameter_shapes().items():
if args.verbose:
print('Parameter {} -> {}'.format(name, shape))
print(f'Parameter {name} -> {shape}')
in_shape = shape.lens()
in_type = shape.type_string()
if not args.fill1:
if not args.fill1 and not args.fill0:
test_input = np.random.rand(*(in_shape)).astype(
get_np_datatype(in_type))
else:
elif not args.fill0:
test_input = np.ones(in_shape).astype(get_np_datatype(in_type))
else:
test_input = np.zeros(in_shape).astype(get_np_datatype(in_type))
test_inputs[name] = test_input
params[name] = migraphx.to_gpu(migraphx.argument(test_input))
params[name] = migraphx.argument(test_input)
pred_migx = np.array(migraphx.from_gpu(model.run(params)[-1]))
pred_migx = np.array(model.run(params)[-1])
if use_onnx:
sess = ort.InferenceSession(model_name, providers=[args.provider])
ort_params = {}
for input in sess.get_inputs():
ort_params[input.name] = test_inputs[input.name]
pred_ort = sess.run(None, ort_params)[-1]
try:
pred_fw = sess.run(None, ort_params)[-1]
except Exception as e:
if any(input_dims):
print(
'Error: custom input dim may not be compatible with onnx runtime'
)
raise e
else:
import tensorflow as tf
def load_tf_graph(model_name):
with tf.io.gfile.GFile(model_name, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with tf.compat.v1.Graph().as_default() as graph:
tf.graph_util.import_graph_def(graph_def)
return graph
graph = load_tf_graph(model_name)
is_nhwc = False
graph_ops = []
for op in graph.get_operations():
graph_ops.append(op.name)
if 'Conv' in op.node_def.op:
if 'NHWC' in op.get_attr('data_format').decode('utf-8'):
is_nhwc = True
graph_ops_set = set(graph_ops)
tf_dict = {}
for name in test_inputs.keys():
# graph.get_operations() adds 'import/' to the op name
tf_name = f'import/{name}'
if tf_name not in graph_ops_set:
continue
x = graph.get_tensor_by_name(f'{tf_name}:0')
tf_input = test_inputs[name]
# transpose input for NHWC model
if tf_input.ndim == 4 and is_nhwc:
tf_dict[x] = np.transpose(tf_input, (0, 2, 3, 1))
else:
tf_dict[x] = tf_input
# assume last node in graph is output
# TODO: let user specify op name for output
y = graph.get_tensor_by_name(f'{graph_ops[-1]}:0')
with tf.compat.v1.Session(graph=graph) as sess:
y_out = sess.run(y, feed_dict=tf_dict)
pred_fw = y_out
is_correct = check_correctness(pred_ort, pred_migx, args.tolerance,
is_correct = check_correctness(pred_fw, pred_migx, args.tolerance,
args.tolerance, args.verbose)
verbose_string = ' Rerun with --verbose for detailed information.' \
if not args.verbose else ''
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment