Commit 7d344568 authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_dim_onnx_parser

parents 69e4955e e16faac2
...@@ -16,8 +16,8 @@ argument miopen_quant_convolution::compute(context& ctx, ...@@ -16,8 +16,8 @@ argument miopen_quant_convolution::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
auto x_desc = make_tensor(args[0].get_shape(), true); auto x_desc = make_tensor(args[0].get_shape(), int8_x4_format);
auto w_desc = make_tensor(args[1].get_shape(), true); auto w_desc = make_tensor(args[1].get_shape(), int8_x4_format);
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
float alpha = 1; float alpha = 1;
...@@ -49,8 +49,8 @@ shape miopen_quant_convolution::compile(context& ctx, ...@@ -49,8 +49,8 @@ shape miopen_quant_convolution::compile(context& ctx,
std::vector<shape> inputs) std::vector<shape> inputs)
{ {
shape workspace_shape{}; shape workspace_shape{};
auto x_desc = make_tensor(inputs[0], true); auto x_desc = make_tensor(inputs[0], int8_x4_format);
auto w_desc = make_tensor(inputs[1], true); auto w_desc = make_tensor(inputs[1], int8_x4_format);
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
std::size_t workspace_size = 0; std::size_t workspace_size = 0;
...@@ -62,8 +62,15 @@ shape miopen_quant_convolution::compile(context& ctx, ...@@ -62,8 +62,15 @@ shape miopen_quant_convolution::compile(context& ctx,
&workspace_size); &workspace_size);
workspace_shape = shape{shape::int8_type, {workspace_size}}; workspace_shape = shape{shape::int8_type, {workspace_size}};
auto arg_vec4_x = to_gpu(generate_argument(pack_int8_shape(inputs[0]))); auto x_shape = inputs[0];
auto arg_vec4_w = to_gpu(generate_argument(pack_int8_shape(inputs[1]))); auto w_shape = inputs[1];
if(int8_x4_format)
{
x_shape = pack_int8_shape(x_shape);
w_shape = pack_int8_shape(w_shape);
}
auto arg_vec4_x = to_gpu(generate_argument(x_shape));
auto arg_vec4_w = to_gpu(generate_argument(w_shape));
auto y = allocate_gpu(output_shape); auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape); auto workspace = allocate_gpu(workspace_shape);
......
...@@ -3831,7 +3831,6 @@ TEST_CASE(reshape_non_standard_test) ...@@ -3831,7 +3831,6 @@ TEST_CASE(reshape_non_standard_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::op::reshape op; migraphx::op::reshape op;
std::vector<int64_t> reshape_dims{4, 3, 2};
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4}}; migraphx::shape s{migraphx::shape::float_type, {2, 3, 4}};
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto tran_x = auto tran_x =
......
...@@ -1173,12 +1173,6 @@ TEST_CASE(gru_forward_args) ...@@ -1173,12 +1173,6 @@ TEST_CASE(gru_forward_args)
0.3852, -0.1170, -0.2937, 0.2979, -0.1357, 0.4257, 0.3884, -0.2916, 0.1071, 0.0934, 0.3852, -0.1170, -0.2937, 0.2979, -0.1357, 0.4257, 0.3884, -0.2916, 0.1071, 0.0934,
0.3645, -0.4310, -0.3480, 0.0702, -0.1558}; 0.3645, -0.4310, -0.3480, 0.0702, -0.1558};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}}; migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432, std::vector<float> input{-0.8432,
-0.9887, -0.9887,
...@@ -1199,9 +1193,6 @@ TEST_CASE(gru_forward_args) ...@@ -1199,9 +1193,6 @@ TEST_CASE(gru_forward_args)
-1.0536, -1.0536,
-0.2529}; -0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f; float clip = 0.0f;
// 3 args // 3 args
...@@ -1242,6 +1233,11 @@ TEST_CASE(gru_forward_args) ...@@ -1242,6 +1233,11 @@ TEST_CASE(gru_forward_args)
// 4 args (bias is used) // 4 args (bias is used)
{ {
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input}); auto seq = mm->add_literal(migraphx::literal{in_shape, input});
...@@ -1280,6 +1276,9 @@ TEST_CASE(gru_forward_args) ...@@ -1280,6 +1276,9 @@ TEST_CASE(gru_forward_args)
// 4 args (ih is used) // 4 args (ih is used)
{ {
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input}); auto seq = mm->add_literal(migraphx::literal{in_shape, input});
...@@ -2210,15 +2209,6 @@ TEST_CASE(gru_bidirectional_args) ...@@ -2210,15 +2209,6 @@ TEST_CASE(gru_bidirectional_args)
0.4101, 0.2641, -0.4110, -0.1681, 0.3582, -0.2089, 0.0852, 0.0963, 0.3866, 0.1955, 0.4101, 0.2641, -0.4110, -0.1681, 0.3582, -0.2089, 0.0852, 0.0963, 0.3866, 0.1955,
-0.2174, 0.1996, -0.2252, 0.1748, 0.1833, -0.3155, 0.2567, -0.4387, 0.3402, 0.0599}; -0.2174, 0.1996, -0.2252, 0.1748, 0.1833, -0.3155, 0.2567, -0.4387, 0.3402, 0.0599};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484, 0.2363,
-0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992, 0.1517, 0.1817,
-0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203, 0.2187, -0.2990, -0.0416,
0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591, -0.2990, 0.2167, 0.1395, 0.2317,
0.1318, 0.1909, -0.3615, 0.1953, -0.2582, -0.2217, 0.3723, 0.1458, 0.2630, -0.0377,
0.1754, 0.0800, -0.3964, -0.3247, 0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}}; migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432, std::vector<float> input{-0.8432,
-0.9887, -0.9887,
...@@ -2239,11 +2229,6 @@ TEST_CASE(gru_bidirectional_args) ...@@ -2239,11 +2229,6 @@ TEST_CASE(gru_bidirectional_args)
-1.0536, -1.0536,
-0.2529}; -0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f; float clip = 0.0f;
// 3 args // 3 args
...@@ -2288,6 +2273,15 @@ TEST_CASE(gru_bidirectional_args) ...@@ -2288,6 +2273,15 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (bias is used) // 4 args (bias is used)
{ {
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484,
0.2363, -0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992,
0.1517, 0.1817, -0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203,
0.2187, -0.2990, -0.0416, 0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591,
-0.2990, 0.2167, 0.1395, 0.2317, 0.1318, 0.1909, -0.3615, 0.1953, -0.2582,
-0.2217, 0.3723, 0.1458, 0.2630, -0.0377, 0.1754, 0.0800, -0.3964, -0.3247,
0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input}); auto seq = mm->add_literal(migraphx::literal{in_shape, input});
...@@ -2330,6 +2324,10 @@ TEST_CASE(gru_bidirectional_args) ...@@ -2330,6 +2324,10 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (ih is used) // 4 args (ih is used)
{ {
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input}); auto seq = mm->add_literal(migraphx::literal{in_shape, input});
...@@ -4186,7 +4184,6 @@ TEST_CASE(lstm_bidirectional_var_seq_lens) ...@@ -4186,7 +4184,6 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
-0.83699064, 0.49162736, -0.8271, -0.5683, 0.4562, -0.83699064, 0.49162736, -0.8271, -0.5683, 0.4562,
-1.2545, 1.2729, -0.4082, -0.4392, -0.9406, -1.2545, 1.2729, -0.4082, -0.4392, -0.9406,
0.7794, 1.8194, -0.5811, 0.2166}; 0.7794, 1.8194, -0.5811, 0.2166};
std::vector<int> sl_data{1, 2, 3};
float clip = 0.0f; float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}}; migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
...@@ -4196,10 +4193,11 @@ TEST_CASE(lstm_bidirectional_var_seq_lens) ...@@ -4196,10 +4193,11 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}}; migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape ic_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}}; migraphx::shape ic_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape pph_shape{migraphx::shape::float_type, {num_dirct, 3 * hidden_size}}; migraphx::shape pph_shape{migraphx::shape::float_type, {num_dirct, 3 * hidden_size}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
// concatenation of hidden states as program output // concatenation of hidden states as program output
{ {
std::vector<int> sl_data{1, 2, 3};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input_data}); auto seq = mm->add_literal(migraphx::literal{in_shape, input_data});
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
struct gemm_add : verify_program<gemm_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {1, 2, 3}};
migraphx::shape m2_shape{migraphx::shape::float_type, {1, 3, 4}};
migraphx::shape m3_shape{migraphx::shape::float_type, {1, 2, 4}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_parameter("2", m2_shape);
auto l3 = mm->add_parameter("3", m3_shape);
auto dot = mm->add_instruction(migraphx::make_op("dot"), l1, l2);
mm->add_instruction(migraphx::make_op("add"), dot, l3);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {16, 16, 4, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p;
}
};
...@@ -129,7 +129,6 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con ...@@ -129,7 +129,6 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print::set_terminate_handler(name); auto_print::set_terminate_handler(name);
if(migraphx::enabled(MIGRAPHX_DUMP_TEST{})) if(migraphx::enabled(MIGRAPHX_DUMP_TEST{}))
migraphx::save(p, name + ".mxr"); migraphx::save(p, name + ".mxr");
std::vector<std::pair<std::string, result_future>> results;
std::vector<std::string> target_names; std::vector<std::string> target_names;
for(const auto& tname : migraphx::get_targets()) for(const auto& tname : migraphx::get_targets())
{ {
...@@ -145,6 +144,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con ...@@ -145,6 +144,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
} }
if(not target_names.empty()) if(not target_names.empty())
{ {
std::vector<std::pair<std::string, result_future>> results;
migraphx::parameter_map m; migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes()) for(auto&& x : p.get_parameter_shapes())
{ {
......
...@@ -12,7 +12,6 @@ struct test_conv_bias_clipped_relu : verify_program<test_conv_bias_clipped_relu> ...@@ -12,7 +12,6 @@ struct test_conv_bias_clipped_relu : verify_program<test_conv_bias_clipped_relu>
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
std::vector<size_t> input_lens{4, 3, 3, 3};
auto input = auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto weights = auto weights =
......
...@@ -27,7 +27,7 @@ elif [ "$#" -eq 1 ]; then ...@@ -27,7 +27,7 @@ elif [ "$#" -eq 1 ]; then
PREFIX=$1 PREFIX=$1
fi fi
echo "Dependencies are install at $PREFIX" echo "Dependencies are installed at $PREFIX"
# Install deps with rbuild # Install deps with rbuild
rbuild prepare -d $PREFIX -s develop rbuild prepare -d $PREFIX -s develop
...@@ -35,3 +35,5 @@ rbuild prepare -d $PREFIX -s develop ...@@ -35,3 +35,5 @@ rbuild prepare -d $PREFIX -s develop
# install onnx package for unit tests # install onnx package for unit tests
pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8 pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8
# pin version of protobuf in Python for onnx runtime unit tests
pip3 install protobuf==3.20.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment