Commit 70fe454f authored by Umang Yadav's avatar Umang Yadav
Browse files

Merge branch 'develop' into resnet50_partition

parents ea0b8059 f47e0b5b
 mvn_default_axes_fp16_test:
&
dataout"MeanVarianceNormalizationmvn_default_axes_fp16_testZ
data





b
out





B
\ No newline at end of file
 $mvn_default_axes_rank_too_small_test:
&
dataout"MeanVarianceNormalization$mvn_default_axes_rank_too_small_testZ
data



b
out



B
\ No newline at end of file
 mvn_default_axes_test:~
&
dataout"MeanVarianceNormalizationmvn_default_axes_testZ
data




b
out




B
\ No newline at end of file
 mvn_rank_2_fp16_test:z
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_fp16_testZ
data



b
out



B
\ No newline at end of file
 mvn_rank_2_test:u
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_testZ
data


b
out


B
\ No newline at end of file
......@@ -42,11 +42,14 @@
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/unknown.hpp>
#include <migraphx/env.hpp>
#include <migraphx/serialize.hpp>
#include "test.hpp"
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_CK_WORKAROUNDS);
migraphx::program optimize_onnx(const std::string& name, bool run_passes = false)
{
migraphx::onnx_options options;
......@@ -4501,6 +4504,66 @@ TEST_CASE(mean_integral_test)
EXPECT(p == prog);
}
void mvn_n_rank_test(std::vector<int64_t> axes,
std::vector<size_t> input_shape,
const std::string& test_file)
{
using migraphx::make_op;
migraphx::program p;
auto* mm = p.get_main_module();
auto data = mm->add_parameter("data", {migraphx::shape::float_type, std::move(input_shape)});
auto data_mean = mm->add_instruction(make_op("reduce_mean", {{"axes", axes}}), data);
auto data_mean_squared = add_common_op(*mm, make_op("mul"), {data_mean, data_mean});
auto data_squared = add_common_op(*mm, make_op("mul"), {data, data});
auto data_squared_mean =
mm->add_instruction(make_op("reduce_mean", {{"axes", axes}}), data_squared);
auto mean_sub = add_common_op(*mm, make_op("sub"), {data_squared_mean, data_mean_squared});
auto std = add_common_op(*mm, make_op("sqrt"), {mean_sub});
auto dividend = add_common_op(*mm, make_op("sub"), {data, data_mean});
auto epsilon = mm->add_literal({migraphx::shape::float_type, {1e-9}});
auto divisor = add_common_op(*mm, make_op("add"), {std, epsilon});
add_common_op(*mm, make_op("div"), {dividend, divisor});
auto prog = optimize_onnx(test_file);
EXPECT(p == prog);
}
TEST_CASE(mvn_default_axes_test)
{
mvn_n_rank_test({0, 2, 3}, {2, 2, 2, 2}, "mvn_default_axes_test.onnx");
}
TEST_CASE(mvn_default_axes_rank_too_small_test)
{
EXPECT(
test::throws([&] { migraphx::parse_onnx("mvn_default_axes_rank_too_small_test.onnx"); }));
}
TEST_CASE(mvn_default_axes_rank_too_big_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_default_axes_rank_too_big_test.onnx"); }));
}
TEST_CASE(mvn_rank_2_test) { mvn_n_rank_test({1}, {2, 2}, "mvn_rank_2_test.onnx"); }
TEST_CASE(mvn_rank_3_test) { mvn_n_rank_test({0, 1}, {2, 2, 2}, "mvn_rank_3_test.onnx"); }
TEST_CASE(mvn_axes_rank_too_small_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_axes_rank_too_small_test.onnx"); }));
}
TEST_CASE(mvn_axes_rank_too_big_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mvn_axes_rank_too_big_test.onnx"); }));
}
TEST_CASE(min_test)
{
migraphx::program p;
......@@ -5480,6 +5543,31 @@ TEST_CASE(qlinearmatmul_2D_test)
EXPECT(p.sort() == prog.sort());
}
migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m,
const migraphx::instruction_ref ins,
const migraphx::instruction_ref round,
const migraphx::shape s,
const int64_t min_quant,
const int64_t max_quant)
{
migraphx::instruction_ref min_arg;
migraphx::instruction_ref max_arg;
if(migraphx::enabled(MIGRAPHX_ENABLE_CK_WORKAROUNDS{}))
{
std::vector<int> min_data(s.elements(), min_quant);
std::vector<int> max_data(s.elements(), max_quant);
min_arg = m.add_literal(migraphx::literal(s, min_data));
max_arg = m.add_literal(migraphx::literal(s, max_data));
}
else
{
min_arg = m.add_literal(migraphx::literal{migraphx::shape{s.type()}, {min_quant}});
max_arg = m.add_literal(migraphx::literal{migraphx::shape{s.type()}, {max_quant}});
}
return migraphx::insert_common_op(m, ins, migraphx::make_op("clip"), {round, min_arg, max_arg});
}
TEST_CASE(quantizelinear_test)
{
migraphx::program p;
......@@ -5488,16 +5576,10 @@ TEST_CASE(quantizelinear_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -5519,16 +5601,10 @@ TEST_CASE(quantizelinear_int32_test)
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -5555,15 +5631,9 @@ TEST_CASE(quantizelinear_zero_point_test)
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, add, s, -128, 127);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
......@@ -5594,15 +5664,9 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_bcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, add, s, -128, 127);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
......@@ -8031,11 +8095,6 @@ TEST_CASE(transpose_gather_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(trilu_neg_k_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("trilu_neg_k_test.onnx"); }));
}
TEST_CASE(undefined_test)
{
migraphx::program p;
......
trilu_batch_diff_k_test:i
triu_batch_diff_k_test:h

x
ky"Trilutrilu_batch_diff_k_test*
ky"Trilutriu_batch_diff_k_test*
:BkZ
x

......@@ -12,4 +12,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
trilu_neg_k_test:c
triu_neg_k_test:b

x
ky"Trilutrilu_neg_k_test*:
ky"Trilutriu_neg_k_test*:
BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file
trilu_out_k_test:Z
triu_out_k_test:Y

x
ky"Trilutrilu_out_k_test*
ky"Trilutriu_out_k_test*
:BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file
trilu_row_one_test:\
triu_row_one_test:[

x
ky"Trilutrilu_row_one_test*
ky"Trilutriu_row_one_test*
:BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file

trilu_test:E
 triu_test:D
xy"Trilu
trilu_testZ
xy"Trilu triu_testZ
x


......@@ -10,4 +8,4 @@ trilu_testZ
y


B
\ No newline at end of file
B
\ No newline at end of file
......@@ -1211,6 +1211,115 @@ TEST_CASE(mean_integral_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
template <typename T = float>
std::vector<T> mvn_test(std::vector<size_t> data_lens, const std::string& test_file)
{
migraphx::program p = migraphx::parse_onnx(test_file);
p.compile(migraphx::make_target("ref"));
migraphx::shape data_shape(migraphx::shape::get_type<T>{}, std::move(data_lens));
std::vector<T> data(data_shape.elements());
std::iota(begin(data), end(data), 0);
migraphx::parameter_map pm;
pm["data"] = migraphx::argument(data_shape, data.data());
auto result = p.eval(pm).back();
std::vector<T> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(mvn_default_axes_test)
{
auto result = mvn_test({2, 2, 2, 2}, "mvn_default_axes_test.onnx");
std::vector<float> gold{-1.32424438,
-1.08347268,
-0.84270097,
-0.60192927,
-1.32424438,
-1.08347268,
-0.84270097,
-0.60192927,
0.60192927,
0.84270097,
1.08347268,
1.32424438,
0.60192927,
0.84270097,
1.08347268,
1.32424438};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_default_axes_fp16_test)
{
using migraphx::half;
auto result = mvn_test<half>({2, 2, 2, 2}, "mvn_default_axes_fp16_test.onnx");
std::vector<half> gold{half{-1.324},
half{-1.084},
half{-0.843},
half{-0.602},
half{-1.324},
half{-1.084},
half{-0.843},
half{-0.602},
half{0.602},
half{0.843},
half{1.084},
half{1.324},
half{0.602},
half{0.843},
half{1.084},
half{1.324}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_2_test)
{
auto result = mvn_test({2, 2}, "mvn_rank_2_test.onnx");
std::vector<float> gold{-1, 1, -1, 1};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_2_fp16_test)
{
using migraphx::half;
auto result = mvn_test<migraphx::half>({2, 2}, "mvn_rank_2_fp16_test.onnx");
std::vector<migraphx::half> gold{half{-1}, half{1}, half{-1}, half{1}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_3_test)
{
auto result = mvn_test({2, 2, 2}, "mvn_rank_3_test.onnx");
std::vector<float> gold{-1.34164079,
-1.34164079,
-0.4472136,
-0.4472136,
0.4472136,
0.4472136,
1.34164079,
1.34164079};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_3_fp16_test)
{
using migraphx::half;
auto result = mvn_test<half>({2, 2, 2}, "mvn_rank_3_fp16_test.onnx");
std::vector<half> gold{half{-1.342},
half{-1.342},
half{-0.4473},
half{-0.4473},
half{0.4473},
half{0.4473},
half{1.342},
half{1.342}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mod_test)
{
migraphx::program p = migraphx::parse_onnx("mod_test.onnx");
......@@ -2124,9 +2233,10 @@ std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::prog
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(trilu_test)
TEST_CASE(triu_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
......@@ -2135,9 +2245,9 @@ TEST_CASE(trilu_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
TEST_CASE(triu_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_batch_diff_k_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
......@@ -2146,9 +2256,42 @@ TEST_CASE(trilu_batch_diff_k_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
TEST_CASE(tril_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_lower_test.onnx");
migraphx::program p = migraphx::parse_onnx("tril_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 0, 0, 0, 5, 6, 0, 0, 9, 10, 11, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(triu_neg_k_test)
{
migraphx::program p = migraphx::parse_onnx("triu_neg_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 0, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_neg_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_neg_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
......@@ -2157,9 +2300,9 @@ TEST_CASE(trilu_lower_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
TEST_CASE(triu_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_out_k_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
......@@ -2168,9 +2311,20 @@ TEST_CASE(trilu_out_k_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
TEST_CASE(tril_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(triu_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_row_one_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
......@@ -2179,4 +2333,15 @@ TEST_CASE(trilu_row_one_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("tril_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
std::vector<float> gold = {1, 2, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -154,7 +154,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test.exclude(r'test_maxunpool_export_without_output_shape_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int32_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int8_cpu')
backend_test.exclude(r'test_mvn_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_iinput_shape_is_NCd1_weight_ignore_index_cpu'
)
......@@ -591,9 +590,6 @@ def disabled_tests_onnx_1_9_0(backend_test):
backend_test.exclude(r'test_gru_batchwise_cpu')
backend_test.exclude(r'test_lstm_batchwise_cpu')
backend_test.exclude(r'test_simple_rnn_batchwise_cpu')
backend_test.exclude(r'test_tril_cpu')
backend_test.exclude(r'test_tril_one_row_neg_cpu')
backend_test.exclude(r'test_tril_square_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
......@@ -803,7 +799,6 @@ def disabled_tests_onnx_1_13_0(backend_test):
backend_test.exclude(r'test_group_normalization_example_cpu')
backend_test.exclude(r'test_group_normalization_example_expanded_cpu')
backend_test.exclude(r'test_mish_cpu')
backend_test.exclude(r'test_mvn_expanded_ver18_cpu')
backend_test.exclude(r'test_optional_get_element_optional_sequence_cpu')
backend_test.exclude(r'test_optional_get_element_optional_tensor_cpu')
backend_test.exclude(r'test_optional_get_element_tensor_cpu')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment