Commit d0202590 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'test_runner_match_input_output' into migraphx_for_ort

parents 2e43e30b 414ea291
...@@ -183,6 +183,8 @@ struct miopen_apply ...@@ -183,6 +183,8 @@ struct miopen_apply
add_extend_op("softmax"); add_extend_op("softmax");
add_extend_op("topk"); add_extend_op("topk");
add_precompile_op("pointwise");
add_batch_norm_inference_op(); add_batch_norm_inference_op();
add_convolution_op(); add_convolution_op();
add_deconvolution_op(); add_deconvolution_op();
...@@ -381,6 +383,21 @@ struct miopen_apply ...@@ -381,6 +383,21 @@ struct miopen_apply
}); });
} }
void add_precompile_op(const std::string& name)
{
apply_map.emplace(name, [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
refs.push_back(output);
return mod->replace_instruction(
ins,
make_op("gpu::precompile_op", {{"op", to_value(ins->get_operator())}}),
refs,
ins->module_inputs());
});
}
void add_batch_norm_inference_op() void add_batch_norm_inference_op()
{ {
apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) { apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <migraphx/eliminate_data_type.hpp> #include <migraphx/eliminate_data_type.hpp>
#include <migraphx/eliminate_identity.hpp> #include <migraphx/eliminate_identity.hpp>
#include <migraphx/eliminate_pad.hpp> #include <migraphx/eliminate_pad.hpp>
#include <migraphx/fuse_pointwise.hpp>
#include <migraphx/inline_module.hpp> #include <migraphx/inline_module.hpp>
#include <migraphx/insert_pad.hpp> #include <migraphx/insert_pad.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#include <migraphx/simplify_qdq.hpp> #include <migraphx/simplify_qdq.hpp>
#include <migraphx/simplify_reshapes.hpp> #include <migraphx/simplify_reshapes.hpp>
#include <migraphx/gpu/allocation_model.hpp> #include <migraphx/gpu/allocation_model.hpp>
#include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp> #include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/eliminate_workspace.hpp> #include <migraphx/gpu/eliminate_workspace.hpp>
...@@ -42,6 +44,20 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -42,6 +44,20 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS) MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_POINTWISE_FUSION)
struct id_pass
{
std::string name() const { return "id"; }
void apple(const module&) const {}
};
pass enable_pass(bool enabled, pass p)
{
if(enabled)
return p;
return id_pass{};
}
std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options& options) const std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options& options) const
{ {
...@@ -84,6 +100,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -84,6 +100,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
simplify_reshapes{}, simplify_reshapes{},
propagate_constant{}, propagate_constant{},
dead_code_elimination{}, dead_code_elimination{},
enable_pass(enabled(MIGRAPHX_ENABLE_POINTWISE_FUSION{}), fuse_pointwise{}),
dead_code_elimination{},
mlir_conv{&ctx}, mlir_conv{&ctx},
lowering{&ctx, options.offload_copy}, lowering{&ctx, options.offload_copy},
eliminate_contiguous{"gpu::contiguous"}, eliminate_contiguous{"gpu::contiguous"},
...@@ -96,6 +114,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -96,6 +114,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
fuse_ops{&ctx, options.fast_math}, fuse_ops{&ctx, options.fast_math},
dead_code_elimination{}, dead_code_elimination{},
compile_ops{&ctx},
dead_code_elimination{},
write_literals{&ctx}, write_literals{&ctx},
schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}, not enabled(MIGRAPHX_DISABLE_SCHEDULE_PASS{})}, schedule{gpu::schedule_model{ctx.get_current_device().nstreams()}, not enabled(MIGRAPHX_DISABLE_SCHEDULE_PASS{})},
memory_coloring{"hip::allocate"}, memory_coloring{"hip::allocate"},
......
#include <migraphx/eliminate_contiguous.hpp> #include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <pointwise.hpp>
#include <test.hpp> #include <test.hpp>
void run_pass(migraphx::module& m) void run_pass(migraphx::module& m)
...@@ -159,4 +161,25 @@ TEST_CASE(standard_flatten_op) ...@@ -159,4 +161,25 @@ TEST_CASE(standard_flatten_op)
EXPECT(std::distance(m.begin(), m.end()) == (count - 1)); EXPECT(std::distance(m.begin(), m.end()) == (count - 1));
} }
TEST_CASE(contiguous_pointwise)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3, 8, 8}};
migraphx::program p;
auto* mm = p.get_main_module();
{
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", migraphx::shape{migraphx::shape::float_type, {3}});
auto yb = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {2, 3, 8, 8}}}), y);
auto yc = mm->add_instruction(migraphx::make_op("contiguous"), yb);
auto add = add_pointwise(p, "main:pointwise0", {x, yc}, single_pointwise("add"));
mm->add_instruction(pass_op{}, add);
}
auto count = std::distance(mm->begin(), mm->end());
run_pass(*mm);
EXPECT(std::distance(mm->begin(), mm->end()) == (count - 1));
EXPECT(std::none_of(
mm->begin(), mm->end(), [](auto&& ins) { return ins.name() == "contiguous"; }));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -7,38 +7,13 @@ ...@@ -7,38 +7,13 @@
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <test.hpp> #include <test.hpp>
#include <pointwise.hpp>
void run_pass(migraphx::program& p) void run_pass(migraphx::program& p)
{ {
migraphx::run_passes(p, {migraphx::fuse_pointwise{}, migraphx::dead_code_elimination{}}); migraphx::run_passes(p, {migraphx::fuse_pointwise{}, migraphx::dead_code_elimination{}});
} }
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = p.create_module(name);
auto* mm = p.get_main_module();
pm->set_bypass();
std::vector<migraphx::instruction_ref> params;
std::transform(inputs.begin(), inputs.end(), std::back_inserter(params), [&](auto input) {
return pm->add_parameter("x" + std::to_string(params.size()),
migraphx::shape{input->get_shape().type()});
});
auto r = f(pm, params);
pm->add_return({r});
return mm->add_instruction(migraphx::make_op("pointwise"), inputs, {pm});
}
auto single_pointwise(const std::string& name)
{
return [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op(name), inputs);
};
}
TEST_CASE(single) TEST_CASE(single)
{ {
migraphx::shape s{migraphx::shape::float_type, {2, 3}}; migraphx::shape s{migraphx::shape::float_type, {2, 3}};
...@@ -60,9 +35,9 @@ TEST_CASE(single) ...@@ -60,9 +35,9 @@ TEST_CASE(single)
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s); auto z = mm->add_parameter("z", s);
auto add1 = add_pointwise(p2, "pointwise0", {x, y}, single_pointwise("add")); auto add1 = add_pointwise(p2, "main:pointwise0", {x, y}, single_pointwise("add"));
auto pass = mm->add_instruction(pass_op{}, add1); auto pass = mm->add_instruction(pass_op{}, add1);
auto add2 = add_pointwise(p2, "pointwise1", {pass, z}, single_pointwise("add")); auto add2 = add_pointwise(p2, "main:pointwise1", {pass, z}, single_pointwise("add"));
mm->add_return({add2}); mm->add_return({add2});
} }
EXPECT(p1 == p2); EXPECT(p1 == p2);
...@@ -84,14 +59,15 @@ TEST_CASE(double_add) ...@@ -84,14 +59,15 @@ TEST_CASE(double_add)
run_pass(p1); run_pass(p1);
migraphx::program p2; migraphx::program p2;
{ {
auto* mm = p2.get_main_module(); auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s); auto z = mm->add_parameter("z", s);
auto fadd = add_pointwise(p2, "pointwise0", {x, y, z}, [=](auto* pm, const auto& inputs) { auto fadd =
auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]); add_pointwise(p2, "main:pointwise0", {x, y, z}, [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op("add"), add1, inputs[2]); auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
}); return pm->add_instruction(migraphx::make_op("add"), add1, inputs[2]);
});
mm->add_return({fadd}); mm->add_return({fadd});
} }
EXPECT(p1.sort() == p2.sort()); EXPECT(p1.sort() == p2.sort());
...@@ -117,10 +93,10 @@ TEST_CASE(used_twice_not_fused) ...@@ -117,10 +93,10 @@ TEST_CASE(used_twice_not_fused)
auto* mm = p2.get_main_module(); auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto add1 = add_pointwise(p2, "pointwise0", {x, y}, single_pointwise("add")); auto add1 = add_pointwise(p2, "main:pointwise0", {x, y}, single_pointwise("add"));
auto pass = mm->add_instruction(pass_op{}, add1); auto pass = mm->add_instruction(pass_op{}, add1);
auto fadd = auto fadd = add_pointwise(
add_pointwise(p2, "pointwise1", {add1, y, pass}, [=](auto* pm, const auto& inputs) { p2, "main:pointwise1", {add1, y, pass}, [=](auto* pm, const auto& inputs) {
auto add2 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]); auto add2 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
return pm->add_instruction(migraphx::make_op("add"), inputs[2], add2); return pm->add_instruction(migraphx::make_op("add"), inputs[2], add2);
}); });
...@@ -149,7 +125,7 @@ TEST_CASE(used_twice_fused) ...@@ -149,7 +125,7 @@ TEST_CASE(used_twice_fused)
auto* mm = p2.get_main_module(); auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto fadd = add_pointwise(p2, "pointwise0", {x, y}, [=](auto* pm, const auto& inputs) { auto fadd = add_pointwise(p2, "main:pointwise0", {x, y}, [=](auto* pm, const auto& inputs) {
auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]); auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
auto add2 = pm->add_instruction(migraphx::make_op("add"), add1, inputs[0]); auto add2 = pm->add_instruction(migraphx::make_op("add"), add1, inputs[0]);
auto add3 = pm->add_instruction(migraphx::make_op("add"), add1, inputs[1]); auto add3 = pm->add_instruction(migraphx::make_op("add"), add1, inputs[1]);
...@@ -179,11 +155,11 @@ TEST_CASE(duplicate_inputs) ...@@ -179,11 +155,11 @@ TEST_CASE(duplicate_inputs)
auto* mm = p2.get_main_module(); auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto add1 = add_pointwise(p2, "pointwise0", {x}, [=](auto* pm, const auto& inputs) { auto add1 = add_pointwise(p2, "main:pointwise0", {x}, [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[0]); return pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[0]);
}); });
auto pass = mm->add_instruction(pass_op{}, add1); auto pass = mm->add_instruction(pass_op{}, add1);
auto add2 = add_pointwise(p2, "pointwise1", {pass, y}, single_pointwise("add")); auto add2 = add_pointwise(p2, "main:pointwise1", {pass, y}, single_pointwise("add"));
mm->add_return({add2}); mm->add_return({add2});
} }
EXPECT(p1.sort() == p2.sort()); EXPECT(p1.sort() == p2.sort());
...@@ -207,7 +183,35 @@ TEST_CASE(scalar_input) ...@@ -207,7 +183,35 @@ TEST_CASE(scalar_input)
{ {
auto* mm = p2.get_main_module(); auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto add1 = add_pointwise(p2, "pointwise0", {x}, [=](auto* pm, const auto& inputs) { auto add1 = add_pointwise(p2, "main:pointwise0", {x}, [=](auto* pm, const auto& inputs) {
auto y = pm->add_literal(1.0f);
return pm->add_instruction(migraphx::make_op("add"), inputs[0], y);
});
mm->add_return({add1});
}
EXPECT(p1 == p2);
}
TEST_CASE(contiguous_input)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto one = mm->add_literal(1.0f);
auto yb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), one);
auto y = mm->add_instruction(migraphx::make_op("contiguous"), yb);
auto add1 = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_return({add1});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto add1 = add_pointwise(p2, "main:pointwise0", {x}, [=](auto* pm, const auto& inputs) {
auto y = pm->add_literal(1.0f); auto y = pm->add_literal(1.0f);
return pm->add_instruction(migraphx::make_op("add"), inputs[0], y); return pm->add_instruction(migraphx::make_op("add"), inputs[0], y);
}); });
...@@ -216,4 +220,32 @@ TEST_CASE(scalar_input) ...@@ -216,4 +220,32 @@ TEST_CASE(scalar_input)
EXPECT(p1 == p2); EXPECT(p1 == p2);
} }
TEST_CASE(all_scalar_input)
{
migraphx::shape s{migraphx::shape::float_type};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto add1 = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_return({add1});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto add1 = add_pointwise(p2, "main:pointwise0", {x, y}, [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
});
mm->add_return({add1});
}
EXPECT(p1.get_output_shapes().size() == 1);
EXPECT(p1.get_output_shapes().front().scalar());
EXPECT(p1.get_output_shapes() == p2.get_output_shapes());
EXPECT(p1 == p2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
File mode changed from 100644 to 100755
#ifndef MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#define MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#include <migraphx/program.hpp>
#include <migraphx/module.hpp>
#include <migraphx/make_op.hpp>
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = p.create_module(name);
auto* mm = p.get_main_module();
pm->set_bypass();
std::vector<migraphx::instruction_ref> params;
std::transform(inputs.begin(), inputs.end(), std::back_inserter(params), [&](auto input) {
return pm->add_parameter("x" + std::to_string(params.size()),
migraphx::shape{input->get_shape().type()});
});
auto r = f(pm, params);
pm->add_return({r});
return mm->add_instruction(migraphx::make_op("pointwise"), inputs, {pm});
}
inline auto single_pointwise(const std::string& name)
{
return [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op(name), inputs);
};
}
#endif // MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
...@@ -19,6 +19,7 @@ TEST_CASE(perf_report) ...@@ -19,6 +19,7 @@ TEST_CASE(perf_report)
std::string output = ss.str(); std::string output = ss.str();
EXPECT(migraphx::contains(output, "Summary:")); EXPECT(migraphx::contains(output, "Summary:"));
EXPECT(migraphx::contains(output, "Batch size:"));
EXPECT(migraphx::contains(output, "Rate:")); EXPECT(migraphx::contains(output, "Rate:"));
EXPECT(migraphx::contains(output, "Total time:")); EXPECT(migraphx::contains(output, "Total time:"));
EXPECT(migraphx::contains(output, "Total instructions time:")); EXPECT(migraphx::contains(output, "Total instructions time:"));
......
#include <migraphx/stringutils.hpp>
#include <test.hpp>
TEST_CASE(interpolate_string_simple1)
{
std::string input = "Hello ${w}!";
auto s = migraphx::interpolate_string(input, {{"w", "world"}});
EXPECT(s == "Hello world!");
}
TEST_CASE(interpolate_string_simple2)
{
std::string input = "${hello}";
auto s = migraphx::interpolate_string(input, {{"hello", "bye"}});
EXPECT(s == "bye");
}
TEST_CASE(interpolate_string_unbalanced)
{
std::string input = "${hello";
EXPECT(test::throws([&] { migraphx::interpolate_string(input, {{"hello", "bye"}}); }));
}
TEST_CASE(interpolate_string_extra_space)
{
std::string input = "${ hello }";
auto s = migraphx::interpolate_string(input, {{"hello", "bye"}});
EXPECT(s == "bye");
}
TEST_CASE(interpolate_string_multiple)
{
std::string input = "${h} ${w}!";
auto s = migraphx::interpolate_string(input, {{"w", "world"}, {"h", "Hello"}});
EXPECT(s == "Hello world!");
}
TEST_CASE(interpolate_string_next)
{
std::string input = "${hh}${ww}!";
auto s = migraphx::interpolate_string(input, {{"ww", "world"}, {"hh", "Hello"}});
EXPECT(s == "Helloworld!");
}
TEST_CASE(interpolate_string_dollar_sign)
{
std::string input = "$hello";
auto s = migraphx::interpolate_string(input, {{"hello", "bye"}});
EXPECT(s == "$hello");
}
TEST_CASE(interpolate_string_missing)
{
std::string input = "${hello}";
EXPECT(test::throws([&] { migraphx::interpolate_string(input, {{"h", "bye"}}); }));
}
TEST_CASE(interpolate_string_custom1)
{
std::string input = "****{{a}}****";
auto s = migraphx::interpolate_string(input, {{"a", "b"}}, "{{", "}}");
EXPECT(s == "****b****");
}
TEST_CASE(interpolate_string_custom2)
{
std::string input = "****{{{a}}}****";
auto s = migraphx::interpolate_string(input, {{"a", "b"}}, "{{{", "}}}");
EXPECT(s == "****b****");
}
TEST_CASE(interpolate_string_custom3)
{
std::string input = "****{{{{a}}}}****";
auto s = migraphx::interpolate_string(input, {{"a", "b"}}, "{{{{", "}}}}");
EXPECT(s == "****b****");
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -45,14 +45,6 @@ int main(int argc, const char* argv[]) ...@@ -45,14 +45,6 @@ int main(int argc, const char* argv[])
run_verify rv; run_verify rv;
rv.add_validation_for("gpu", &validate_gpu); rv.add_validation_for("gpu", &validate_gpu);
rv.disable_test_for("cpu", {"test_if_lp", "test_if_param", "test_if_literal"}); rv.disable_test_for("cpu", {"test_if_lp", "test_if_param", "test_if_literal"});
rv.disable_test_for("gpu", rv.disable_test_for("gpu", {"test_conv_bn_add"});
{"batch_quant_dot_2",
"batch_quant_dot_3",
"batch_quant_dot_5",
"quant_dot_3args_1",
"quant_dot_3args_2",
"quant_dot_3args_3",
"quant_dot_3args_4",
"quant_dot_3args_5"});
rv.run(argc, argv); rv.run(argc, argv);
} }
...@@ -2,44 +2,44 @@ ...@@ -2,44 +2,44 @@
#include "verify_program.hpp" #include "verify_program.hpp"
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/op/common.hpp> #include <migraphx/make_op.hpp>
// struct test_conv_bn_add : verify_program<test_conv_bn_add> struct test_conv_bn_add : verify_program<test_conv_bn_add>
// { {
// static migraphx::instruction_ref add_bn(migraphx::program& p, static migraphx::instruction_ref add_bn(migraphx::module& m,
// migraphx::instruction_ref x, migraphx::instruction_ref x,
// std::size_t channels, std::size_t channels,
// std::size_t seed = 1) std::size_t seed = 1)
// { {
// migraphx::shape vars{migraphx::shape::float_type, {channels}}; migraphx::shape vars{migraphx::shape::float_type, {channels}};
// auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + seed)));
// seed))); auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + seed)));
// + seed))); auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + seed)));
// 3 + seed))); auto variance = auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + seed)));
// mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + seed))); return return m.add_instruction(
// mm->add_instruction( migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
// migraphx::op::batch_norm_inference{}, x, scale, bias, mean, variance); }
// }
// migraphx::program create_program() const migraphx::program create_program() const
// { {
// migraphx::program p; migraphx::program p;
// std::size_t ichannels = 64; auto* mm = p.get_main_module();
// std::size_t ochannels = 256; std::size_t ichannels = 64;
// auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, ichannels, 56, std::size_t ochannels = 256;
// 56}}); auto w = mm->add_literal(migraphx::generate_literal( auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, ichannels, 56, 56}});
// {migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 1)); auto w = mm->add_literal(migraphx::generate_literal(
// auto y = mm->add_parameter("y", {migraphx::shape::float_type, {1, ichannels, 56, {migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 1));
// 56}}); auto v = mm->add_literal(migraphx::generate_literal( auto y = mm->add_parameter("y", {migraphx::shape::float_type, {1, ichannels, 56, 56}});
// {migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2)); auto v = mm->add_literal(migraphx::generate_literal(
// auto relu1 = mm->add_instruction(migraphx::op::relu{}, x); {migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2));
// auto conv1 = mm->add_instruction(migraphx::op::convolution{}, relu1, w); auto relu1 = mm->add_instruction(migraphx::make_op("relu"), x);
// auto bn1 = add_bn(p, conv1, ochannels, 1); auto conv1 = mm->add_instruction(migraphx::make_op("convolution"), relu1, w);
// auto relu2 = mm->add_instruction(migraphx::op::relu{}, y); auto bn1 = add_bn(*mm, conv1, ochannels, 1);
// auto conv2 = mm->add_instruction(migraphx::op::convolution{}, relu2, v); auto relu2 = mm->add_instruction(migraphx::make_op("relu"), y);
// auto bn2 = add_bn(p, conv2, ochannels, 1); auto conv2 = mm->add_instruction(migraphx::make_op("convolution"), relu2, v);
// auto sum = mm->add_instruction(migraphx::op::add{}, bn1, bn2); auto bn2 = add_bn(*mm, conv2, ochannels, 1);
// mm->add_instruction(migraphx::op::relu{}, sum); auto sum = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
// return p; mm->add_instruction(migraphx::make_op("relu"), sum);
// } return p;
// }; }
};
...@@ -103,79 +103,69 @@ auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name()) ...@@ -103,79 +103,69 @@ auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
} // namespace operation_operators } // namespace operation_operators
template <class T> template <class T>
auto normalize_compute_shape_op(rank<1>, const T& x, const std::vector<shape>& inputs) auto compute_shape_op(rank<3>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs)) -> decltype(x.compute_shape(inputs))
{
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
template <class T>
shape normalize_compute_shape_op(rank<0>, const T& x, const std::vector<shape>&)
{ {
std::string name = x.name(); return x.compute_shape(inputs);
MIGRAPHX_THROW("Shape not computable: " + name);
} }
template <class T> template <class T>
shape normalize_compute_shape_op(const T& x, const std::vector<shape>& inputs) auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs))
{ {
return normalize_compute_shape_op(rank<1>{}, x, inputs); dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
} }
template <class T> template <class T>
auto compute_shape_op(rank<1>, auto compute_shape_op(rank<1>, const T& x, const std::vector<shape>& inputs)
const T& x, -> decltype(x.compute_shape(inputs, {}))
const std::vector<shape>& inputs,
const std::vector<module_ref>& mod_args)
-> decltype(x.compute_shape(inputs, mod_args))
{ {
return x.compute_shape(inputs, mod_args); return x.compute_shape(inputs, {});
} }
template <class T> template <class T>
shape shape compute_shape_op(rank<0>, const T& x, const std::vector<shape>&)
compute_shape_op(rank<0>, const T& x, const std::vector<shape>&, const std::vector<module_ref>&)
{ {
std::string name = x.name(); std::string name = x.name();
MIGRAPHX_THROW("Shape not computable: " + name); MIGRAPHX_THROW("Shape not computable: " + name);
} }
template <class T> template <class T>
shape compute_shape_op(const T& x, shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
const std::vector<shape>& inputs,
const std::vector<module_ref>& mod_args)
{ {
return compute_shape_op(rank<1>{}, x, inputs, mod_args); return compute_shape_op(rank<3>{}, x, inputs);
} }
template <class T> template <class T>
auto normalize_compute_shape_op(rank<1>, auto mod_compute_shape_op(rank<1>,
const T& x, const T& x,
const std::vector<shape>& inputs, const std::vector<shape>& inputs,
std::vector<module_ref>& mod_args) const std::vector<module_ref>& mod_args)
-> decltype(x.normalize_compute_shape(inputs, mod_args)) -> decltype(x.compute_shape(inputs, mod_args))
{ {
return x.normalize_compute_shape(inputs, mod_args); return x.compute_shape(inputs, mod_args);
} }
template <class T> template <class T>
shape normalize_compute_shape_op(rank<0>, shape mod_compute_shape_op(rank<0>,
const T& x, const T& x,
const std::vector<shape>&, const std::vector<shape>& inputs,
const std::vector<module_ref>&) const std::vector<module_ref>& mod_args)
{ {
if(mod_args.empty())
return compute_shape_op(x, inputs);
std::string name = x.name(); std::string name = x.name();
MIGRAPHX_THROW("Shape not computable: " + name); MIGRAPHX_THROW("Shape not computable: " + name);
} }
template <class T> template <class T>
shape normalize_compute_shape_op(const T& x, shape mod_compute_shape_op(const T& x,
const std::vector<shape>& inputs, const std::vector<shape>& inputs,
std::vector<module_ref>& mod_args) const std::vector<module_ref>& mod_args)
{ {
return normalize_compute_shape_op(rank<1>{}, x, inputs, mod_args); return mod_compute_shape_op(rank<1>{}, x, inputs, mod_args);
} }
template <class T> template <class T>
...@@ -488,13 +478,13 @@ lifetime get_lifetime_op(const T&) ...@@ -488,13 +478,13 @@ lifetime get_lifetime_op(const T&)
returns = 'shape', returns = 'shape',
input = 'const std::vector<shape>&', input = 'const std::vector<shape>&',
const = True, const = True,
default = 'detail::normalize_compute_shape_op'), default = 'detail::compute_shape_op'),
virtual('compute_shape', virtual('compute_shape',
returns = 'shape', returns = 'shape',
inputs = 'const std::vector<shape>&', inputs = 'const std::vector<shape>&',
mod_args = 'const std::vector<module_ref>&', mod_args = 'const std::vector<module_ref>&',
const = True, const = True,
default = 'detail::compute_shape_op'), default = 'detail::mod_compute_shape_op'),
virtual('compute', virtual('compute',
returns = 'argument', returns = 'argument',
ctx = 'context&', ctx = 'context&',
...@@ -582,7 +572,7 @@ template <class T> ...@@ -582,7 +572,7 @@ template <class T>
inline auto compute_shape(const T& op, const std::vector<shape>& inputs) inline auto compute_shape(const T& op, const std::vector<shape>& inputs)
-> decltype(op.normalize_compute_shape(inputs)) -> decltype(op.normalize_compute_shape(inputs))
{ {
return detail::normalize_compute_shape_op(op, inputs); return detail::compute_shape_op(op, inputs);
} }
inline shape compute_shape(const operation& op, inline shape compute_shape(const operation& op,
...@@ -607,7 +597,7 @@ inline auto compute_shape(const T& op, ...@@ -607,7 +597,7 @@ inline auto compute_shape(const T& op,
const std::vector<module_ref>& mod_args) const std::vector<module_ref>& mod_args)
-> decltype(op.normalize_compute_shape(inputs, mod_args)) -> decltype(op.normalize_compute_shape(inputs, mod_args))
{ {
return detail::normalize_compute_shape_op(op, inputs, mod_args); return detail::compute_shape_op(op, inputs, mod_args);
} }
inline bool is_context_free(const operation& op) { return op.is_context_free(); } inline bool is_context_free(const operation& op) { return op.is_context_free(); }
......
#!/usr/bin/env python3
import json
import argparse
import os
from sys import argv as sysargs
from sys import version_info as python_version
from sys import exit as sys_exit
import pandas as pd
from datetime import datetime
import venv
import shutil
if (python_version[0] < 3) or (python_version[0] < 3
and python_version[1] < 6):
raise Exception("Please utilize Python version 3.6 and above. Exiting...")
def parse_args():
parser = argparse.ArgumentParser(
description="Parser for MIGraphX ROCTX Markers")
parser.add_argument('--json-path',
type=str,
metavar='json_path',
help='Path to json file')
parser.add_argument('--out',
type=str,
metavar='out',
help='Output directory for run.')
parser.add_argument(
'--study-name',
type=str,
metavar='study-name',
help='Study-name is used for naming the output CSV file.')
parser.add_argument('--repeat',
type=int,
metavar='repeat',
help='Defines number of runs.',
default=2)
parser.add_argument('--parse',
default=False,
action='store_true',
help='Parses given JSON file.')
parser.add_argument('--clean',
default=False,
action='store_true',
help='Removes temporary paths')
parser.add_argument('--run',
type=str,
metavar='run',
help='Enables run and fetches run configs.')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
return args
args = parse_args()
if not len(sysargs) > 1:
raise Exception("No arg is passed. Exiting...")
def parse(file):
with open(file, "r") as read_file:
data = json.load(read_file)
#Get marker names and first marker's time
list_names = []
first_marker = True
first_marker_time = 0
for i in data:
if (i):
if ("Marker start:" in i['name']) and (
i['name'] not in list_names):
list_names.append(i['name'])
if first_marker:
first_marker_time = i['ts']
first_marker = False
if (args.debug):
print(f"FIRST MARKER TIME DETERMINED: {first_marker_time}")
if (first_marker_time == 0):
raise ("FIRST MARKER TIME IS ZERO. EXITING...")
kernel_launch_info = [] #kernel description
kernel_launch_list = [] #kernel launch details
kernel_launch_time = [] #kernel execution time
for i in data:
if (i and i.get('args')):
try:
if (("KernelExecution" in i['args']['desc'])
and (i['ts'] >= first_marker_time)):
kernel_launch_info.append(i['args']['desc'])
kernel_launch_list.append(i)
kernel_launch_time.append(int(i['dur']))
except:
continue
max_index = kernel_launch_time.index(max(kernel_launch_time))
max_kernel_info = kernel_launch_list[max_index]
if (args.debug):
with open('rocTX_kernel_launch_list.txt', 'w') as f:
for i in kernel_launch_list:
f.write(f'{i}')
# Get timing information for each marker name
list_times_per_names = []
for name in list_names:
temp_list = []
for entry in data:
if (entry) and (
name == entry['name']
): # name can match on gpu or cpu side, for gpu, we need data from gpu markers.
if (("gpu::" in name)
and ("UserMarker frame:" in entry['args']['desc'])
): #gpu side information
temp_list.append(int(entry.get('dur')))
elif (("gpu::" not in name)
and ("Marker start:" in entry['args']['desc'])
): #cpu side information
temp_list.append(int(entry.get('dur')))
list_times_per_names.append(temp_list)
if (args.debug):
print(list_times_per_names)
sum_per_name = [] #TODO: refactor stat collection
for list in list_times_per_names:
sum_per_name.append(sum(list))
count_per_name = []
for list in list_times_per_names:
try:
count_per_name.append(len(list))
except:
count_per_name.append(0)
max_per_name = []
for list in list_times_per_names:
try:
max_per_name.append(max(list))
except:
max_per_name.append(0)
min_per_name = []
for list in list_times_per_names:
try:
min_per_name.append(min(list))
except:
min_per_name.append(0)
max_index_per_name = []
for list in list_times_per_names:
try:
max_index_per_name.append(list.index(max(list)))
except:
max_index_per_name.append(0)
max_occur_per_name = []
for list in list_times_per_names:
try:
max_occur_per_name.append(list.count(max(list)))
except:
max_occur_per_name.append(0)
total_time = sum(sum_per_name)
d = {
'SUM': sum_per_name,
'MIN': min_per_name,
'MAX': max_per_name,
'COUNT': count_per_name,
'MAX_INDEX': max_index_per_name,
'MAX_OCCUR': max_occur_per_name
}
df2 = pd.DataFrame(d)
df2.index = list_names
df2.sort_values(by=['SUM'], inplace=True, ascending=False)
if (args.debug):
print(df2)
print(f"\nTOTAL TIME: {total_time} us")
return df2, total_time, max_kernel_info
def run():
repeat_count = args.repeat
if (repeat_count == 0 or repeat_count == float('inf') or not repeat_count):
raise Exception("REPEAT COUNT CANNOT BE ZERO/INFINITY/NULL")
run_args = args.run
#configurations
configs = '--hip-trace --roctx-trace --flush-rate 10ms --timestamp on'
output_dir = f"-d {args.out}"
executable = f"/opt/rocm/bin/migraphx-driver roctx {run_args}"
process_args = configs + ' ' + output_dir + ' ' + executable
for i in range(repeat_count):
os.system('rocprof ' + process_args)
print("RUN COMPLETE.")
def clean():
shutil.rmtree('/tmp/rocm-profile-data/', ignore_errors=False)
def main():
if (args.clean):
clean()
sys_exit()
print("Initiating virtual environment...")
builder = venv.EnvBuilder(clear=True, with_pip=True)
builder.create('/tmp/rocm-profile-data/py/')
python_bin = '/tmp/rocm-profile-data/py' + '/bin/python'
file = args.json_path
if (args.study_name):
filename = args.study_name + ".csv"
else:
filename = "output" + datetime.now().strftime(
"%Y_%m_%d-%I:%M:%S_%p") + ".csv"
with open(filename, 'a') as f:
f.write(f"{args.run}\n")
if (args.run):
curr = os.path.abspath(os.getcwd())
rpd_path = '/tmp/rocm-profile-data/rocmProfileData/'
if not os.path.exists(rpd_path):
print("rocmProfileData DOES NOT EXIST. CLONING...")
os.system(
f"git clone https://github.com/ROCmSoftwarePlatform/rocmProfileData.git {rpd_path}"
)
os.chdir(rpd_path + "rocpd_python/")
os.system(python_bin + ' -m pip install --upgrade pip')
os.system(python_bin + ' setup.py install')
os.chdir(curr)
run()
os.chdir(curr + f"/{args.out}/")
out_path = os.popen(f"ls -td $PWD/*/*/ | head -{args.repeat}").read()
print(f"\nFOLLOWING PATHS WILL BE PARSED:\n{out_path}")
out_path = out_path.splitlines()
df_tot = pd.DataFrame()
tot_time = []
max_kernel_info_list = []
for path in out_path:
path = path.strip('\n')
print("\nPARSING OUTPUT PATH: " + path)
os.chdir(path)
os.system(
f"{python_bin} -m rocpd.rocprofiler_import --ops_input_file hcc_ops_trace.txt --api_input_file hip_api_trace.txt --roctx_input_file roctx_trace.txt trace.rpd"
)
os.system(
f"{python_bin} {rpd_path}/rpd2tracing.py trace.rpd trace.json")
os.chdir(curr)
df, total_time, path_max_kernel_info = parse(path + "trace.json")
max_kernel_info_list.append(path_max_kernel_info)
tot_time.append(total_time)
df_tot = pd.merge(df_tot,
df,
how='outer',
left_index=True,
right_index=True)
if (args.debug):
print("JSON FILE PATH: " + path + "trace.json")
df_tot.to_csv("rocTX_runs_dataframe.csv")
if (args.debug):
print(df_tot)
tmp_sum = df_tot.loc[:, df_tot.columns.str.contains('SUM')].astype(int)
tmp_min = df_tot.loc[:, df_tot.columns.str.contains('MIN')].astype(int)
tmp_max = df_tot.loc[:, df_tot.columns.str.match("^MAX_.$")].astype(
int)
tmp_count = df_tot.loc[:, df_tot.columns.str.match("COUNT")].astype(
int)
tmp_sum['SUM_avg'] = tmp_sum.mean(axis=1).astype(int)
tmp_min['MIN_avg'] = tmp_min.mean(axis=1).astype(int)
tmp_max['MAX_avg'] = tmp_max.mean(axis=1).astype(int)
df2 = tmp_sum['SUM_avg'].copy()
df2 = pd.merge(df2,
tmp_min['MIN_avg'],
how='outer',
left_index=True,
right_index=True)
df2 = pd.merge(df2,
tmp_max['MAX_avg'],
how='outer',
left_index=True,
right_index=True)
df2 = pd.merge(df2,
tmp_count['COUNT_x'],
how='outer',
left_index=True,
right_index=True)
df2.rename(columns={'COUNT_x': 'COUNT'}, inplace=True)
df2 = df2.loc[:, ~df2.columns.duplicated(
)] #there will be many COUNT_x in df2
df2.sort_values(by=['SUM_avg'], inplace=True, ascending=False)
if (args.debug):
pd.set_option('display.max_columns', None)
print(df_tot) #all data from all runs
print("\n*** RESULTS ***")
print(df2)
out_time = sum(tot_time) / len(tot_time)
print(f"\nAVG TOTAL TIME: {out_time} us\n")
df2.to_csv(filename, mode='a')
with open(filename, 'a') as f:
f.write(f"AVG TOTAL TIME: {out_time} us\n")
print(f"OUTPUT CSV FILE:\t{filename}")
if (args.debug):
#kernels that took the longest time printed
for item in max_kernel_info_list:
print(f"KERNEL NAME: {item['name']}\t\t{item['dur']}")
with open('rocTX_kernel_timing_details.txt', 'w') as f:
f.write(
"MOST TIME CONSUMING KERNELS IN EACH ITERATION (EXPECTED TO BE SAME KERNEL):\n"
)
for i in max_kernel_info_list:
f.write(f"KERNEL NAME: {i['name']}\t\t{i['dur']}\n")
print("KERNEL TIMING DETAILS:\trocTX_kernel_timing_details.txt")
print("ALL DATA FROM ALL RUNS:\trocTX_runs_dataframe.csv")
elif (args.parse):
if not (file):
raise Exception("JSON PATH IS NOT PROVIDED FOR PARSING.")
parse(file)
else:
raise Exception("PLEASE PROVIDE A COMMAND: RUN, PARSE, CLEAN")
if __name__ == "__main__":
main()
import os import os, sys
import numpy as np import numpy as np
import argparse import argparse
import onnx import onnx
...@@ -54,36 +54,112 @@ def read_pb_file(filename): ...@@ -54,36 +54,112 @@ def read_pb_file(filename):
tensor.ParseFromString(data_str) tensor.ParseFromString(data_str)
np_array = numpy_helper.to_array(tensor) np_array = numpy_helper.to_array(tensor)
return np_array return tensor.name, np_array
def wrapup_inputs(io_folder, parameter_names): def wrapup_inputs(io_folder, param_names):
index = 0
param_map = {} param_map = {}
for param_name in parameter_names: data_array = []
file_name = io_folder + '/input_' + str(index) + '.pb' name_array = []
data = read_pb_file(file_name) for i in range(len(param_names)):
param_map[param_name] = data file_name = io_folder + '/input_' + str(i) + '.pb'
index = index + 1 name, data = read_pb_file(file_name)
param_map[name] = data
data_array.append(data)
if name:
name_array.append(name)
if len(name_array) < len(data_array):
param_map = {}
for i in range(len(param_names)):
param_map[param_names[i]] = data_array[i]
return param_map
for name in param_names:
if not name in param_map.keys():
print("Input {} does not exist!".format(name))
sys.exit()
return param_map return param_map
def read_outputs(io_folder, out_num): def read_outputs(io_folder, out_names):
outputs = [] outputs = []
for i in range(out_num): data_array = []
name_array = []
for i in range(len(out_names)):
file_name = io_folder + '/output_' + str(i) + '.pb' file_name = io_folder + '/output_' + str(i) + '.pb'
data = read_pb_file(file_name) name, data = read_pb_file(file_name)
outputs.append(data) data_array.append(data)
if name:
name_array.append(name)
if len(name_array) < len(data_array):
return data_array
for name in out_names:
index = name_array.index(name)
outputs.append(data_array[index])
return outputs return outputs
def model_parameter_names(model_file_name):
with open(model_file_name, 'rb') as pfile:
data_str = pfile.read()
model_proto = onnx.ModelProto()
model_proto.ParseFromString(data_str)
init_names = set([(i.name) for i in model_proto.graph.initializer])
param_names = [
input.name for input in model_proto.graph.input
if input.name not in init_names
]
return param_names
def model_output_names(model_file_name):
with open(model_file_name, 'rb') as pfile:
data_str = pfile.read()
model_proto = onnx.ModelProto()
model_proto.ParseFromString(data_str)
output_names = [out.name for out in model_proto.graph.output]
return output_names
def get_input_shapes(sample_case, param_names):
param_shape_map = {}
name_array = []
shape_array = []
for i in range(len(param_names)):
file_name = sample_case + '/input_' + str(i) + '.pb'
name, data = read_pb_file(file_name)
param_shape_map[name] = data.shape
shape_array.append(data.shape)
if name:
name_array.append(name)
if len(name_array) < len(shape_array):
param_shape_map = {}
for i in range(len(param_names)):
param_shape_map[param_names[i]] = shape_array[i]
return param_shape_map
for name in param_names:
if not name in param_shape_map:
print("Input {} does not exist!".format(name))
sys.exit()
return param_shape_map
def run_one_case(model, param_map): def run_one_case(model, param_map):
# convert np array to model argument # convert np array to model argument
pp = {} pp = {}
for key, val in param_map.items(): for key, val in param_map.items():
print("input = {}".format(val))
pp[key] = migraphx.argument(val) pp[key] = migraphx.argument(val)
# run the model # run the model
...@@ -106,12 +182,11 @@ def check_correctness(gold_outputs, outputs, rtol=1e-3, atol=1e-3): ...@@ -106,12 +182,11 @@ def check_correctness(gold_outputs, outputs, rtol=1e-3, atol=1e-3):
out_num = len(gold_outputs) out_num = len(gold_outputs)
ret = True ret = True
for i in range(out_num): for i in range(out_num):
print("Expected value: \n{}".format(gold_outputs[i]))
print("Actual value: \n{}".format(outputs[i]))
if not np.allclose(gold_outputs[i], outputs[i], rtol, atol): if not np.allclose(gold_outputs[i], outputs[i], rtol, atol):
print("Output {} is incorrect ...".format(i)) print("\nOutput {} is incorrect ...".format(i))
print("Expected value: \n{}".format(gold_outputs[i])) print("Expected value: \n{}".format(gold_outputs[i]))
print("Actual value: \n{}".format(outputs[i])) print("......")
print("Actual value: \n{}\n".format(outputs[i]))
ret = False ret = False
return ret return ret
...@@ -142,21 +217,34 @@ def main(): ...@@ -142,21 +217,34 @@ def main():
# get model full path # get model full path
model_name = get_model_name(test_loc) model_name = get_model_name(test_loc)
model_path_name = test_loc + '/' + model_name model_path_name = test_loc + '/' + model_name
# get param names
param_names = model_parameter_names(model_path_name)
# get output names
output_names = model_output_names(model_path_name)
# get test cases
cases = get_test_cases(test_loc)
sample_case = test_loc + '/' + cases[0]
param_shapes = get_input_shapes(sample_case, param_names)
for name, dims in param_shapes.items():
print("Input: {}, shape: {}".format(name, dims))
print()
# read and compile model # read and compile model
model = migraphx.parse_onnx(model_path_name) model = migraphx.parse_onnx(model_path_name, map_input_dims=param_shapes)
param_names = model.get_parameter_names()
output_shapes = model.get_output_shapes() output_shapes = model.get_output_shapes()
model.compile(migraphx.get_target(target)) model.compile(migraphx.get_target(target))
# get test cases # get test cases
cases = get_test_cases(test_loc)
case_num = len(cases) case_num = len(cases)
correct_num = 0 correct_num = 0
for case_name in cases: for case_name in cases:
io_folder = test_loc + '/' + case_name io_folder = test_loc + '/' + case_name
input_data = wrapup_inputs(io_folder, param_names) input_data = wrapup_inputs(io_folder, param_names)
gold_output_data = read_outputs(io_folder, len(output_shapes)) gold_outputs = read_outputs(io_folder, output_names)
# if input shape is different from model shape, reload and recompile # if input shape is different from model shape, reload and recompile
# model # model
...@@ -170,7 +258,7 @@ def main(): ...@@ -170,7 +258,7 @@ def main():
output_data = run_one_case(model, input_data) output_data = run_one_case(model, input_data)
# check output correctness # check output correctness
ret = check_correctness(gold_output_data, output_data) ret = check_correctness(gold_outputs, output_data)
if ret: if ret:
correct_num += 1 correct_num += 1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment