Commit ac04f3cc authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

manual_merge

parents d39c3343 d8011adf
trilu_batch_diff_k_test:i
triu_batch_diff_k_test:h

x
ky"Trilutrilu_batch_diff_k_test*
ky"Trilutriu_batch_diff_k_test*
:BkZ
x

......@@ -12,4 +12,4 @@



B
\ No newline at end of file
B
\ No newline at end of file
trilu_neg_k_test:c
triu_neg_k_test:b

x
ky"Trilutrilu_neg_k_test*:
ky"Trilutriu_neg_k_test*:
BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file
trilu_out_k_test:Z
triu_out_k_test:Y

x
ky"Trilutrilu_out_k_test*
ky"Trilutriu_out_k_test*
:BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file
trilu_row_one_test:\
triu_row_one_test:[

x
ky"Trilutrilu_row_one_test*
ky"Trilutriu_row_one_test*
:BkZ
x

......@@ -10,4 +10,4 @@
y


B
\ No newline at end of file
B
\ No newline at end of file

trilu_test:E
 triu_test:D
xy"Trilu
trilu_testZ
xy"Trilu triu_testZ
x


......@@ -10,4 +8,4 @@ trilu_testZ
y


B
\ No newline at end of file
B
\ No newline at end of file
......@@ -24,7 +24,6 @@
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/pass_manager.hpp>
......@@ -48,7 +47,7 @@ TEST_CASE(averagepool_notset_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(averagepool_nt_cip_test)
......@@ -66,7 +65,7 @@ TEST_CASE(averagepool_nt_cip_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {8.33333};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_flat_test)
......@@ -77,15 +76,15 @@ TEST_CASE(batch_norm_flat_test)
migraphx::shape x_shape{migraphx::shape::float_type, {10}};
migraphx::shape c_shape(migraphx::shape::float_type, {1});
std::vector<float> x_data = {1.6524342,
-0.51048076,
0.32543048,
2.4410043,
2.0833702,
0.44981122,
1.0044622,
-0.24006313,
-0.43065986,
0.07626268};
-0.51048076,
0.32543048,
2.4410043,
2.0833702,
0.44981122,
1.0044622,
-0.24006313,
-0.43065986,
0.07626268};
std::vector<float> scale_data = {-0.02927135};
std::vector<float> bias_data = {0.42347777};
std::vector<float> mean_data = {-0.00449735};
......@@ -112,7 +111,7 @@ TEST_CASE(batch_norm_flat_test)
0.43305403,
0.4408022,
0.42019472};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
......@@ -149,7 +148,7 @@ TEST_CASE(batch_norm_rank_2_test)
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
......@@ -185,7 +184,7 @@ TEST_CASE(batch_norm_1d_test)
0.4927, 0.771, -1.956, -2.123, -0.664, -0.583, -0.7207, -0.5127};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_2d_test)
......@@ -251,7 +250,7 @@ TEST_CASE(batch_norm_2d_test)
-2.76707697e+00, 1.47579327e+01, 4.94736385e+00, 2.68847847e+01, -6.49254417e+00,
1.94286156e+00, -7.19223642e+00, -3.70413971e+00, -4.04303551e-01, -1.01827660e+01,
1.49476433e+00};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_3d_test)
......@@ -293,7 +292,7 @@ TEST_CASE(batch_norm_3d_test)
6.098, 11.03, 2.81, 2.81, 2.81, 12.125, 3.143, 8.53, 17.52, 4.938, 15.71,
1.347, 4.938, 1.167, 6.098, 12.67, 12.67, 4.453, 4.453, -0.4768, 12.67};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(celu_verify_test)
......@@ -310,12 +309,12 @@ TEST_CASE(celu_verify_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> correct(6);
std::vector<float> gold(6);
float alpha = 0.5;
std::transform(data.begin(), data.end(), correct.begin(), [&](auto x) {
std::transform(data.begin(), data.end(), gold.begin(), [&](auto x) {
return std::max(0.0f, x) + std::min(0.0f, alpha * std::expm1(x / alpha));
});
EXPECT(migraphx::verify::verify_range(result_vector, correct));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(clip_args_type_mismatch)
......@@ -331,7 +330,7 @@ TEST_CASE(clip_args_type_mismatch)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.5, 2, 2, 1.9, 2.5, 3, 2.9, 3.2, 3.7};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(depthtospace_simple_test)
......@@ -349,7 +348,7 @@ TEST_CASE(depthtospace_simple_test)
std::vector<float> gold = {0, 12, 1, 13, 2, 14, 24, 36, 25, 37, 26, 38, 3, 15, 4, 16,
5, 17, 27, 39, 28, 40, 29, 41, 6, 18, 7, 19, 8, 20, 30, 42,
31, 43, 32, 44, 9, 21, 10, 22, 11, 23, 33, 45, 34, 46, 35, 47};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(spacetodepth_simple_test)
......@@ -367,7 +366,7 @@ TEST_CASE(spacetodepth_simple_test)
std::vector<float> gold = {0, 2, 4, 12, 14, 16, 24, 26, 28, 36, 38, 40, 1, 3, 5, 13,
15, 17, 25, 27, 29, 37, 39, 41, 6, 8, 10, 18, 20, 22, 30, 32,
34, 42, 44, 46, 7, 9, 11, 19, 21, 23, 31, 33, 35, 43, 45, 47};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(spacetodepth_depthtospace_test)
......@@ -375,11 +374,11 @@ TEST_CASE(spacetodepth_depthtospace_test)
// space to depth
auto p1 = migraphx::parse_onnx("spacetodepth_simple_test.onnx");
p1.compile(migraphx::make_target("ref"));
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
std::vector<float> gold_data_in(48);
std::iota(std::begin(gold_data_in), std::end(gold_data_in), 0);
migraphx::shape s_x_1{migraphx::shape::float_type, {1, 2, 4, 6}};
migraphx::parameter_map pp1;
pp1["x"] = migraphx::argument(s_x_1, data_in.data());
pp1["x"] = migraphx::argument(s_x_1, gold_data_in.data());
auto result1 = p1.eval(pp1).back();
// depth to space
auto p2 = migraphx::parse_onnx("depthtospace_simple_test.onnx");
......@@ -389,7 +388,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
auto result2 = p2.eval(pp2).back();
std::vector<float> result_vector2;
result2.visit([&](auto output) { result_vector2.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_range(result_vector2, data_in));
EXPECT(migraphx::verify::verify_rms_range(result_vector2, gold_data_in));
}
TEST_CASE(eyelike_verify_test)
......@@ -406,8 +405,8 @@ TEST_CASE(eyelike_verify_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.};
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
std::vector<float> gold_eyelike_mat = {0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold_eyelike_mat));
}
TEST_CASE(eyelike_verify_negk_test)
......@@ -424,8 +423,8 @@ TEST_CASE(eyelike_verify_negk_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.};
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
std::vector<float> gold_eyelike_mat = {0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold_eyelike_mat));
}
TEST_CASE(gather_elements)
......@@ -448,7 +447,7 @@ TEST_CASE(gather_elements)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.125, 0.5625, -0.9375, 0.25, 0.5625, 0.9375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(gemm_test)
......@@ -492,7 +491,7 @@ TEST_CASE(gemm_test)
0.8098607, 1.2157929, 1.1010075, 1.0706307, 1.0429881, 1.1771785, 1.2362702,
0.8239243, 1.1112559, 0.9639262, 1.0813537, 0.8825792, 1.121141, 1.1885703,
1.2227502, 1.4568202, 1.1388762, 1.55058, 1.0958102, 1.4637487, 1.5756242};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(gemm_half_test)
......@@ -536,7 +535,71 @@ TEST_CASE(gemm_half_test)
2.143, 2.062, 1.921, 1.836, 2.203, 1.952, 1.055, 1.225, 1.418, 1.209, 1.155,
1.42, 1.234, 1.302, 1.593, 1.368, 1.289, 1.327, 1.451, 1.394};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
template <typename T = float>
std::vector<T> norm_test(const std::vector<size_t>& x_dims,
std::vector<T>& scale,
std::vector<T>& bias,
const std::string& onnx_file)
{
migraphx::program p = migraphx::parse_onnx(onnx_file);
p.compile(migraphx::make_target("ref"));
migraphx::shape s_x{migraphx::shape::get_type<T>{}, x_dims};
migraphx::shape s_s{migraphx::shape::get_type<T>{}, {scale.size()}};
migraphx::shape s_b{migraphx::shape::get_type<T>{}, {scale.size()}};
std::vector<T> x(s_x.elements());
std::iota(std::begin(x), std::end(x), 1);
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s_x, x.data());
pp["scale"] = migraphx::argument(s_s, scale.data());
pp["bias"] = migraphx::argument(s_b, bias.data());
auto result = p.eval(pp).back();
std::vector<T> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(group_norm_test)
{
std::vector<float> scale{1.2, 0.8};
std::vector<float> bias{0.5, 0.2};
std::vector<float> result_vector =
norm_test<float>({1, 4, 2}, scale, bias, "group_norm_3d_test.onnx");
std::vector<float> gold = {-1.10996256,
-0.0366542,
1.0366542,
2.10996256,
-0.87330837,
-0.15776947,
0.55776947,
1.27330837};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(group_norm_half_test)
{
using migraphx::half;
std::vector<half> scale{half{1.2}, half{0.8}};
std::vector<half> bias{half{0.5}, half{0.2}};
std::vector<half> result_vector =
norm_test<half>({1, 4, 2}, scale, bias, "group_norm_3d_half_test.onnx");
std::vector<half> gold = {half{-1.10996256},
half{-0.0366542},
half{1.0366542},
half{2.10996256},
half{-0.87330837},
half{-0.15776947},
half{0.55776947},
half{1.27330837}};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(greaterorequal_test)
......@@ -557,7 +620,7 @@ TEST_CASE(greaterorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, 0.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(hardsigmoid_verify_test)
......@@ -581,7 +644,7 @@ TEST_CASE(hardsigmoid_verify_test)
std::transform(data.begin(), data.end(), gold.begin(), [&](auto x) {
return std::max(0.0f, std::min(x * alpha + beta, 1.0f));
});
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_else_test)
......@@ -603,7 +666,7 @@ TEST_CASE(if_else_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0866565, -0.371067, 0.017719, 0.0250614, 0.0612539, -0.744683};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_else_test_inlined)
......@@ -622,7 +685,7 @@ TEST_CASE(if_else_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0507132, -0.712328, 0.0105797, 0.04569, 0.0185013, -1.16472};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_test)
......@@ -645,7 +708,7 @@ TEST_CASE(if_then_test)
// onnx adds ones so result should be just + 1.0
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_test_inlined)
......@@ -664,7 +727,7 @@ TEST_CASE(if_then_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_literal_test)
......@@ -689,14 +752,14 @@ TEST_CASE(if_literal_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {1, 2, 3, 4, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {5, 4, 3, 2, 1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
}
......@@ -727,7 +790,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_else_multi_output_shapes_test)
......@@ -758,7 +821,7 @@ TEST_CASE(if_then_else_multi_output_shapes_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_pl_test)
......@@ -790,14 +853,14 @@ TEST_CASE(if_pl_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {2, 3, 4, 5, 6, 7};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {1, 2, 3, 4, 5, 6};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
}
......@@ -836,8 +899,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(true);
std::vector<float> gold0(4, 2.0f);
std::vector<float> gold1(12, 4.0f);
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_rms_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_rms_range(results.at(1), gold1));
}
// else branch
......@@ -845,8 +908,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(false);
std::vector<float> gold0(4, 3.0f);
std::vector<float> gold1(12, 5.0f);
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_rms_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_rms_range(results.at(1), gold1));
}
}
......@@ -877,7 +940,7 @@ TEST_CASE(instance_norm_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(instance_norm_dyn_batch_test)
......@@ -919,7 +982,7 @@ TEST_CASE(instance_norm_dyn_batch_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(instance_norm_3d_test)
......@@ -948,7 +1011,131 @@ TEST_CASE(instance_norm_3d_test)
3.18218,
4.05505};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_half_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_half_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
migraphx::parameter_map pp;
migraphx::half nan = std::numeric_limits<migraphx::half>::quiet_NaN();
migraphx::half infinity = std::numeric_limits<migraphx::half>::infinity();
migraphx::half max = std::numeric_limits<migraphx::half>::max();
migraphx::half min = std::numeric_limits<migraphx::half>::min();
migraphx::half val = migraphx::half(3.6);
std::vector<migraphx::half> data = {-infinity, nan, min, val, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 0, 0, 0, 1};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_neg_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_neg_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::parameter_map pp;
float nan = std::numeric_limits<float>::quiet_NaN();
float infinity = std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
std::vector<float> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_double_pos_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_double_pos_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::double_type, {2, 3}};
migraphx::parameter_map pp;
double nan = std::numeric_limits<double>::quiet_NaN();
double infinity = std::numeric_limits<double>::infinity();
double max = std::numeric_limits<double>::max();
double min = std::numeric_limits<double>::min();
std::vector<double> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 0, 0, 0, 1};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_no_detect_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_no_detect_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::parameter_map pp;
float nan = std::numeric_limits<float>::quiet_NaN();
float infinity = std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
std::vector<double> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(layer_norm_test)
{
std::vector<float> scale{1.2, 0.8};
std::vector<float> bias{0.5, 0.2};
std::vector<float> result_vector =
norm_test<float>({1, 4, 2}, scale, bias, "layer_norm_3d_test.onnx");
std::vector<float> gold = {-0.69997597,
0.99998398,
-0.69997597,
0.99998398,
-0.69997597,
0.99998398,
-0.69997597,
0.99998398};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(layer_norm_half_test)
{
using migraphx::half;
std::vector<half> scale{half{1.2}, half{0.8}};
std::vector<half> bias{half{0.5}, half{0.2}};
std::vector<half> result_vector =
norm_test<half>({1, 4, 2}, scale, bias, "layer_norm_3d_half_test.onnx");
std::vector<half> gold = {half{-0.69997597},
half{0.99998398},
half{-0.69997597},
half{0.99998398},
half{-0.69997597},
half{0.99998398},
half{-0.69997597},
half{0.99998398}};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lessorequal_test)
......@@ -969,7 +1156,7 @@ TEST_CASE(lessorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lpnormalization_1norm)
......@@ -997,7 +1184,7 @@ TEST_CASE(lpnormalization_1norm)
3.f / 7.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lpnormalization_2norm)
......@@ -1013,19 +1200,19 @@ TEST_CASE(lpnormalization_2norm)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> correct{0.f,
2.f / 3.f,
-2.f / 3.f,
1.f / 3.f,
1.f / 6.f,
-5.f / 6.f,
3.f / 6.f,
-1.f / 6.f,
-4.f / 5.f,
3.f / 5.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_range(result_vector, correct));
std::vector<float> gold{0.f,
2.f / 3.f,
-2.f / 3.f,
1.f / 3.f,
1.f / 6.f,
-5.f / 6.f,
3.f / 6.f,
-1.f / 6.f,
-4.f / 5.f,
3.f / 5.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_broadcast_test)
......@@ -1056,7 +1243,7 @@ TEST_CASE(mean_broadcast_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold(24, 3);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_test)
......@@ -1083,7 +1270,7 @@ TEST_CASE(mean_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0.0) / num_data;
std::vector<double> gold(num_elms, mean);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_integral_test)
......@@ -1110,7 +1297,116 @@ TEST_CASE(mean_integral_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0) / num_data;
std::vector<int> gold(num_elms, mean);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
template <typename T = float>
std::vector<T> mvn_test(std::vector<size_t> data_lens, const std::string& test_file)
{
migraphx::program p = migraphx::parse_onnx(test_file);
p.compile(migraphx::make_target("ref"));
migraphx::shape data_shape(migraphx::shape::get_type<T>{}, std::move(data_lens));
std::vector<T> data(data_shape.elements());
std::iota(begin(data), end(data), 0);
migraphx::parameter_map pm;
pm["data"] = migraphx::argument(data_shape, data.data());
auto result = p.eval(pm).back();
std::vector<T> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(mvn_default_axes_test)
{
auto result = mvn_test({2, 2, 2, 2}, "mvn_default_axes_test.onnx");
std::vector<float> gold{-1.32424438,
-1.08347268,
-0.84270097,
-0.60192927,
-1.32424438,
-1.08347268,
-0.84270097,
-0.60192927,
0.60192927,
0.84270097,
1.08347268,
1.32424438,
0.60192927,
0.84270097,
1.08347268,
1.32424438};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_default_axes_fp16_test)
{
using migraphx::half;
auto result = mvn_test<half>({2, 2, 2, 2}, "mvn_default_axes_fp16_test.onnx");
std::vector<half> gold{half{-1.324},
half{-1.084},
half{-0.843},
half{-0.602},
half{-1.324},
half{-1.084},
half{-0.843},
half{-0.602},
half{0.602},
half{0.843},
half{1.084},
half{1.324},
half{0.602},
half{0.843},
half{1.084},
half{1.324}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_2_test)
{
auto result = mvn_test({2, 2}, "mvn_rank_2_test.onnx");
std::vector<float> gold{-1, 1, -1, 1};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_2_fp16_test)
{
using migraphx::half;
auto result = mvn_test<migraphx::half>({2, 2}, "mvn_rank_2_fp16_test.onnx");
std::vector<migraphx::half> gold{half{-1}, half{1}, half{-1}, half{1}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_3_test)
{
auto result = mvn_test({2, 2, 2}, "mvn_rank_3_test.onnx");
std::vector<float> gold{-1.34164079,
-1.34164079,
-0.4472136,
-0.4472136,
0.4472136,
0.4472136,
1.34164079,
1.34164079};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mvn_rank_3_fp16_test)
{
using migraphx::half;
auto result = mvn_test<half>({2, 2, 2}, "mvn_rank_3_fp16_test.onnx");
std::vector<half> gold{half{-1.342},
half{-1.342},
half{-0.4473},
half{-0.4473},
half{0.4473},
half{0.4473},
half{1.342},
half{1.342}};
EXPECT(migraphx::verify::verify_rms_range(result, gold));
}
TEST_CASE(mod_test)
......@@ -1137,7 +1433,7 @@ TEST_CASE(mod_test)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_different_types)
......@@ -1165,7 +1461,7 @@ TEST_CASE(mod_test_different_types)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod)
......@@ -1194,7 +1490,7 @@ TEST_CASE(mod_test_fmod)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod_different_types)
......@@ -1224,7 +1520,78 @@ TEST_CASE(mod_test_fmod_different_types)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(multinomial_dyn_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
auto p = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
const size_t batch_size(2);
const size_t categories(5);
const size_t sample_size(100000);
p.compile(migraphx::make_target("ref"));
// Distribution function (2 distributions of 5 categories each)
std::vector<int> dist{15, 25, 15, 25, 20, 20, 20, 10, 25, 25};
EXPECT(dist.size() == categories * batch_size);
std::vector<float> data(categories * batch_size);
std::transform(dist.begin(), dist.end(), data.begin(), [&](auto d) { return log(d); });
// Shape of the probability distribution, which also defines the number of categories
migraphx::shape s{migraphx::shape::float_type, {batch_size, categories}};
migraphx::parameter_map pp;
pp["input"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<int32_t> result_vec(batch_size * sample_size);
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
// Make a categorical histogram of output
// for first result in batch
std::vector<int> res_dist(categories, 0);
size_t r = 0;
for(r = 0; r < result_vec.size() / 2; r++)
res_dist[result_vec[r]]++;
// normalizing factors for original and measured distributions
auto dist_sum = std::accumulate(dist.begin(), dist.begin() + 5, 0);
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
// Values approximate the distribution in dist
std::vector<float> norm(5);
std::vector<float> res_norm(5);
std::transform(dist.begin(), dist.begin() + 5, norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
norm, migraphx::verify::expected{res_norm}, migraphx::verify::tolerance{0.01}));
// Make a categorical histogram of output
// for second result in batch
std::fill(res_dist.begin(), res_dist.end(), 0);
for(; r < result_vec.size(); r++)
res_dist[result_vec[r]]++;
dist_sum = std::accumulate(dist.begin() + 5, dist.end(), 0);
res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
std::transform(dist.begin() + 5, dist.end(), norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
}
TEST_CASE(nonzero_test)
......@@ -1243,7 +1610,289 @@ TEST_CASE(nonzero_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 0, 0, 1, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearadd_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx::program p = migraphx::parse_onnx("qlinearadd_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {64}};
std::vector<uint8_t> data_a = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50,
52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102,
104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126};
migraphx::shape b{migraphx::shape::uint8_type, {64}};
std::vector<uint8_t> data_b = {128, 126, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, 104,
102, 100, 98, 96, 94, 92, 90, 88, 86, 84, 82, 80, 78,
76, 74, 72, 70, 68, 66, 64, 62, 60, 58, 56, 54, 52,
50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26,
24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearadd_bcast_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx::program p = migraphx::parse_onnx("qlinearadd_bcast_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::int8_type, {64}};
std::vector<int8_t> data_a = {-64, -62, -60, -58, -56, -54, -52, -50, -48, -46, -44, -42, -40,
-38, -36, -34, -32, -30, -28, -26, -24, -22, -20, -18, -16, -14,
-12, -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38,
40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62};
migraphx::shape b{migraphx::shape::int8_type, {1, 1, 64}};
std::vector<int8_t> data_b = {96, 94, 92, 90, 88, 86, 84, 82, 80, 78, 76, 74, 72,
70, 68, 66, 64, 62, 60, 58, 56, 54, 52, 50, 48, 46,
44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20,
18, 16, 14, 12, 10, 8, 6, 4, 2, 0, -2, -4, -6,
-8, -10, -12, -14, -16, -18, -20, -22, -24, -26, -28, -30};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 7, 7}};
std::vector<uint8_t> x_data = {255, 174, 162, 25, 203, 168, 58, 15, 59, 237, 95, 129, 0,
64, 56, 242, 153, 221, 168, 12, 166, 232, 178, 186, 195, 237,
162, 237, 188, 39, 124, 77, 80, 102, 43, 127, 230, 21, 83,
41, 40, 134, 255, 154, 92, 141, 42, 148, 247};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {0, 81, 93, 230, 52, 87, 197, 240, 196, 18, 160, 126, 255,
191, 199, 13, 102, 34, 87, 243, 89, 23, 77, 69, 60, 18,
93, 18, 67, 216, 131, 178, 175, 153, 212, 128, 25, 234, 172,
214, 215, 121, 0, 101, 163, 114, 213, 107, 8};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_0_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_0_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 3, 3) output tensor
std::vector<int8_t> gold = {-43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_1_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_1_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 5, 5) output tensor
std::vector<uint8_t> gold = {19, 33, 43, 52, 38, 52, 85, 99, 113, 80, 99, 156, 170,
184, 128, 146, 227, 241, 255, 175, 113, 175, 184, 194, 132};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_scale_1D_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_scale_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 2, 3, 3) output tensor
std::vector<int8_t> gold = {
-43, -29, -15, 28, 42, 56, 99, 113, 127, -43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearglobalavgpool_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md
// #com.microsoft.QLinearGlobalAveragePool
migraphx::program p = migraphx::parse_onnx("qlinearglobalavgpool_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_x{migraphx::shape::uint8_type, {1, 3, 4, 4}};
std::vector<uint8_t> data_x = {160, 156, 152, 148, 144, 140, 136, 132, 124, 120, 116, 112,
108, 104, 100, 96, 64, 72, 80, 88, 96, 104, 112, 120,
136, 144, 152, 160, 168, 176, 184, 192, 120, 121, 122, 123,
124, 125, 126, 127, 129, 130, 131, 132, 133, 134, 135, 136};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sh_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_2D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_2D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {1, 8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8, 1}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_3D_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearMatMul.html
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_3D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {2, 2, 4}};
std::vector<uint8_t> data_a = {
208, 236, 0, 238, 3, 214, 255, 29, 208, 236, 0, 238, 3, 214, 255, 29};
migraphx::shape b{migraphx::shape::uint8_type, {2, 4, 3}};
std::vector<uint8_t> data_b = {152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247,
152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {168, 115, 255, 1, 66, 151, 168, 115, 255, 1, 66, 151};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_downsample_f_test)
......@@ -1264,7 +1913,7 @@ TEST_CASE(resize_downsample_f_test)
std::vector<float> gold = {0.0f, 3.0f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_ac_test)
......@@ -1299,7 +1948,7 @@ TEST_CASE(resize_upsample_linear_ac_test)
11.0f / 3,
4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_test)
......@@ -1320,7 +1969,7 @@ TEST_CASE(resize_upsample_linear_test)
std::vector<float> gold = {
1, 1.25, 1.75, 2, 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25, 3.5, 3, 3.25, 3.75, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_pf_test)
......@@ -1341,7 +1990,7 @@ TEST_CASE(resize_upsample_pf_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_4D_verify_test)
......@@ -1362,7 +2011,7 @@ TEST_CASE(reversesequence_4D_verify_test)
std::vector<float> gold = {
8.0, 9.0, 10.0, 11.0, 4.0, 5.0, 6.0, 7.0, 0.0, 1.0, 2.0, 3.0, 12.0, 13.0, 14.0, 15.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_batch_verify_test)
......@@ -1383,7 +2032,7 @@ TEST_CASE(reversesequence_batch_verify_test)
std::vector<float> gold = {
0.0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, 7.0, 10.0, 9.0, 8.0, 11.0, 15.0, 14.0, 13.0, 12.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_time_verify_test)
......@@ -1404,7 +2053,44 @@ TEST_CASE(reversesequence_time_verify_test)
std::vector<float> gold = {
3.0, 6.0, 9.0, 12.0, 2.0, 5.0, 8.0, 13.0, 1.0, 4.0, 10.0, 14.0, 0.0, 7.0, 11.0, 15.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(round_half_test)
{
migraphx::program p = migraphx::parse_onnx("round_half_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::half_type, {4, 4}};
std::vector<float> tmp = {-3.51,
-3.5,
-3.49,
-2.51,
-2.50,
-2.49,
-1.6,
-1.5,
-0.51,
-0.5,
0.5,
0.6,
2.4,
2.5,
3.5,
4.5};
std::vector<migraphx::half> data{tmp.cbegin(), tmp.cend()};
migraphx::parameter_map param_map;
param_map["x"] = migraphx::argument(xs, data.data());
auto result = p.eval(param_map).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {-4.0, -4.0, -3.0, -3.0, -2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, 4.0};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(selu_test)
......@@ -1424,7 +2110,113 @@ TEST_CASE(selu_test)
std::vector<float> gold = {0.55, 1.05, 0, -0.10912, -0.149251, 6};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_hard_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_hard_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {5}};
std::vector<float> data{-2, -1, 0, 1, 2};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2, 0, 0, 0, 2};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_soft_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_soft_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {5}};
std::vector<float> data{-2, -1, 0, 1, 2};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.5, 0, 0, 0, 0.5};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_verify_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_verify_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::half_type, {5}};
std::vector<float> tmp = {-10.0, -5.0, 0.0, 5.0, 10.0};
std::vector<migraphx::half> data{tmp.cbegin(), tmp.cend()};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {-9.0, -4.0, 1.0, 4.0, 9.0};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_verify2_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_verify2_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::half_type, {5}};
std::vector<float> tmp = {-10.0, -5.0, 0.0, 5.0, 10.0};
std::vector<migraphx::half> data{tmp.cbegin(), tmp.cend()};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {-5.0, 0.0, 5.0, 10.0, 5.0};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_int8_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_int8_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::int8_type, {3, 3}};
std::vector<int8_t> data{-4, -3, -2, -1, 0, 1, 2, 3, 4};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {-2, -1, 0, 0, 0, 0, 0, 1, 2};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(shrink_uint8_test)
{
migraphx::program p = migraphx::parse_onnx("shrink_uint8_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::uint8_type, {3, 3}};
std::vector<uint8_t> data{1, 2, 3, 4, 5, 6, 7, 8, 9};
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {0, 0, 0, 0, 0, 10, 11, 12, 13};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(size_verify_test)
......@@ -1458,7 +2250,7 @@ TEST_CASE(slice_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2, 3};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_5arg_test)
......@@ -1478,7 +2270,7 @@ TEST_CASE(slice_5arg_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {10, 11, 12, 13, 15, 16, 17, 18};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_reverse_test)
......@@ -1498,7 +2290,7 @@ TEST_CASE(slice_reverse_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 13, 12, 11, 19, 18, 17, 16};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_step_test)
......@@ -1518,7 +2310,7 @@ TEST_CASE(slice_step_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(softplus_test)
......@@ -1539,7 +2331,7 @@ TEST_CASE(softplus_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return std::log1p(std::exp(x)); });
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(softsign_test)
......@@ -1560,7 +2352,7 @@ TEST_CASE(softsign_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return x / (1.0 + std::abs(x)); });
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(upsample_test)
......@@ -1579,7 +2371,7 @@ TEST_CASE(upsample_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(where_test)
......@@ -1621,7 +2413,7 @@ TEST_CASE(where_test)
2.0f,
1.0f,
2.0f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::program& p)
......@@ -1638,59 +2430,115 @@ std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::prog
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(trilu_test)
TEST_CASE(triu_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 0, 6, 7, 8, 0, 0, 11, 12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
TEST_CASE(triu_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_batch_diff_k_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
std::vector<float> gold = {0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_test)
{
migraphx::program p = migraphx::parse_onnx("tril_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 0, 0, 0, 5, 6, 0, 0, 9, 10, 11, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
TEST_CASE(triu_neg_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_lower_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_neg_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 0, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_neg_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_neg_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {0, 0, 0, 0, 5, 0, 0, 0, 9, 10, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
TEST_CASE(triu_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_out_k_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold(12, 0);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("tril_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
TEST_CASE(triu_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_row_one_test.onnx");
migraphx::program p = migraphx::parse_onnx("triu_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
std::vector<float> gold = {0, 2, 3, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(tril_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("tril_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
std::vector<float> gold = {1, 2, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -24,7 +24,8 @@
#include <migraphx/program.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/permutation.hpp>
#include <migraphx/op/common.hpp>
#include <sstream>
#include <migraphx/make_op.hpp>
......@@ -81,6 +82,63 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(allocate_static)
{
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}));
}
TEST_CASE(allocate_static_input)
{
migraphx::shape input{migraphx::shape::int64_type, {3}};
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}), input);
}
TEST_CASE(allocate_dyn)
{
migraphx::shape input{migraphx::shape::int64_type, {2}};
auto max_val = std::numeric_limits<std::size_t>::max();
std::vector<migraphx::shape::dynamic_dimension> dyn_dims(
2, migraphx::shape::dynamic_dimension{0, max_val});
expect_shape(migraphx::shape{migraphx::shape::float_type, dyn_dims},
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}),
input);
}
TEST_CASE(allocate_dyn_with_shape_attr)
{
migraphx::shape input{migraphx::shape::int64_type, {4}};
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
expect_shape(shape_attr,
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}),
input);
}
TEST_CASE(allocate_dyn_no_input)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
expect_shape(shape_attr,
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}));
}
TEST_CASE(allocate_shape_and_buf_type_error)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
throws_shape(migraphx::make_op(
"allocate",
{{"shape", migraphx::to_value(shape_attr)}, {"buf_type", migraphx::shape::half_type}}));
}
TEST_CASE(allocate_no_attr_error)
{
migraphx::shape input{migraphx::shape::int64_type, {4}};
throws_shape(migraphx::make_op("allocate"), input);
}
TEST_CASE(argmax_axis0)
{
migraphx::shape input{migraphx::shape::half_type, {2, 3, 4, 5}};
......@@ -156,13 +214,13 @@ TEST_CASE(broadcast)
{
std::vector<std::size_t> lens{1, 1};
migraphx::shape input{migraphx::shape::float_type, {2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", lens}}), input);
}
{
std::vector<std::size_t> lens{2, 2};
migraphx::shape input{migraphx::shape::float_type, {1, 2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", lens}}), input);
}
{
......@@ -862,6 +920,50 @@ TEST_CASE(flatten_dyn_axis4)
input);
}
TEST_CASE(fill_static_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type, {3, 4, 4}};
expect_shape(migraphx::shape{migraphx::shape::int64_type, {3, 4, 4}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_static_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type, {4, 8}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 8}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(gather)
{
{
......@@ -1252,36 +1354,45 @@ TEST_CASE(inconsistent_attr_shape)
input);
}
template <class T>
void test_softmax_variations()
void test_softmax_variations(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{0}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 0}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{1}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 1}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{2}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 2}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{3}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 3}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 4;
throws_shape(T{axis}, input);
throws_shape(migraphx::make_op(name, {{"axis", axis}}), input);
}
}
TEST_CASE(logsoftmax) { test_softmax_variations<migraphx::op::logsoftmax>(); }
TEST_CASE(logsoftmax) { test_softmax_variations("logsoftmax"); }
TEST_CASE(softmax) { test_softmax_variations("softmax"); }
TEST_CASE(lstm)
{
......@@ -1846,12 +1957,42 @@ TEST_CASE(multibroadcast_3in_dyn_dyn)
expect_shape(expected_shape, migraphx::make_op("multibroadcast"), c_shape, a_shape, b_shape);
}
TEST_CASE(multinomial)
TEST_CASE(multinomial_bool_type)
{
migraphx::shape s{migraphx::shape::float_type, {2, 5}};
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4}};
int dtype = 0;
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s, s);
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4}};
migraphx::shape s3{migraphx::shape::float_type, {1, 4}};
int dtype = 2;
expect_shape(s3, migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial_0size_input)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {}};
int dtype = 2;
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial_dyn)
{
migraphx::shape s1{migraphx::shape::int32_type, {{2, 3}, {5, 6}}};
migraphx::shape s2{migraphx::shape::int32_type, {{7, 8}, {9, 10}}};
migraphx::shape s3{migraphx::shape::int32_type, {{2, 3}, {9, 10}}};
expect_shape(
s3, migraphx::make_op("multinomial", {{"dtype", migraphx::shape::int32_type}}), s1, s2);
}
TEST_CASE(nms_shape)
......@@ -2106,6 +2247,13 @@ TEST_CASE(pooling_shape3)
input);
}
TEST_CASE(pooling_shape4)
{
migraphx::shape tiny_input{migraphx::shape::float_type, {4, 1}};
throws_shape(migraphx::make_op("pooling", {{"mode", migraphx::op::pooling_mode::max}}),
tiny_input);
}
TEST_CASE(pooling_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3, {3}}, {3, 3, {3}}, {3, 3}}};
......@@ -2216,6 +2364,20 @@ TEST_CASE(prefix_scan_sum_dyn_2d)
}
}
TEST_CASE(random_uniform)
{
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}, {3, 7}};
migraphx::shape s0{migraphx::shape::uint64_type, {1}};
migraphx::shape s1{migraphx::shape::float_type, dd};
expect_shape(s1, migraphx::make_op("random_uniform"), s0, s1);
}
TEST_CASE(random_seed)
{
migraphx::shape s{migraphx::shape::uint64_type, {1}, {0}};
expect_shape(s, migraphx::make_op("random_seed"));
}
TEST_CASE(quant_convolution_shape)
{
migraphx::shape output{migraphx::shape::int32_type, {4, 4, 1, 1}};
......@@ -2328,47 +2490,54 @@ TEST_CASE(dqlinear_mismatch_type)
throws_shape(migraphx::make_op("dequantizelinear"), input, scales, zeros);
}
template <class T>
void test_reduce_ops()
void test_reduce_ops(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}},
migraphx::make_op(name),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{{0, 1, 2, 3}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}},
migraphx::make_op(name, {{"axes", {0, 1, 2, 3}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 1, 1}}, T{{2, 3}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 1, 1}},
migraphx::make_op(name, {{"axes", {2, 3}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}}, T{{0}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}},
migraphx::make_op(name, {{"axes", {0}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 1}}, T{{-1}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 1}},
migraphx::make_op(name, {{"axes", {-1}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
throws_shape(T{{4}}, input);
throws_shape(migraphx::make_op(name, {{"axes", {4}}}), input);
}
}
// dynamic shape
template <class T>
void test_dyn_reduce_ops()
void test_dyn_reduce_ops(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, {3}}, {2, 4, {4}}}};
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{2, 3, {3}}, {1, 1}})},
T{{-1}},
migraphx::make_op(name, {{"axes", {-1}}}),
input);
}
{
......@@ -2376,7 +2545,7 @@ void test_dyn_reduce_ops()
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{1, 1}, {2, 4, {4}}})},
T{{0}},
migraphx::make_op(name, {{"axes", {0}}}),
input);
}
{
......@@ -2385,24 +2554,24 @@ void test_dyn_reduce_ops()
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{1, 1}, {1, 1}})},
T{{}},
migraphx::make_op(name),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, {3}}, {2, 4, {4}}}};
throws_shape(T{{4}}, input);
throws_shape(migraphx::make_op(name, {{"axes", {4}}}), input);
}
}
TEST_CASE(reduce_max) { test_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean) { test_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod) { test_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum) { test_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reduce_max) { test_reduce_ops("reduce_max"); }
TEST_CASE(reduce_mean) { test_reduce_ops("reduce_mean"); }
TEST_CASE(reduce_prod) { test_reduce_ops("reduce_prod"); }
TEST_CASE(reduce_sum) { test_reduce_ops("reduce_sum"); }
TEST_CASE(reduce_max_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reduce_max_dyn) { test_dyn_reduce_ops("reduce_max"); }
TEST_CASE(reduce_mean_dyn) { test_dyn_reduce_ops("reduce_mean"); }
TEST_CASE(reduce_prod_dyn) { test_dyn_reduce_ops("reduce_prod"); }
TEST_CASE(reduce_sum_dyn) { test_dyn_reduce_ops("reduce_sum"); }
TEST_CASE(reshape_shape)
{
......@@ -2415,13 +2584,21 @@ TEST_CASE(reshape_shape)
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_invalid)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_minus1_reshapes)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
......@@ -2545,14 +2722,14 @@ TEST_CASE(reshape_broadcast_squeeze)
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_broadcast_squeeze_error)
TEST_CASE(reshape_broadcast_squeeze_memlayout_change)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
migraphx::shape output{migraphx::shape::float_type, {2, 16, 256, 80}, {0, 0, 0, 16}};
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_dyn_shape)
TEST_CASE(reshape_dyn_1in)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
for(auto&& new_shape : std::vector<std::vector<int64_t>>{
......@@ -2576,6 +2753,27 @@ TEST_CASE(reshape_dyn_shape)
}
}
TEST_CASE(reshape_dyn_2in_0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
migraphx::shape output{migraphx::shape::float_type, {{1, 4}, {8, 8}, {3, 3}, {1, 1}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_dyn_2in_1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
migraphx::shape output{migraphx::shape::float_type, {{12, 12}, {2, 2}, {1, 1}, {1, 4}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_dyn_2in_2)
{
migraphx::shape input{migraphx::shape::float_type, {2, 24, 1, 1}};
migraphx::shape output{migraphx::shape::float_type, {{1, 2}, {6, 12}, {1, 1}, {4, 4}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_multiple_non_fixed_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 20}, {1, 1}}};
......@@ -2597,6 +2795,199 @@ TEST_CASE(reshape_non_fixed_not_matching_error)
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_shape)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 1, 1}, {1, 3, 4, 2}, {1, 3, 4, 2}})
{
std::vector<std::size_t> lens(new_shape.size());
std::copy(new_shape.begin(), new_shape.end(), lens.begin());
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, -1, 0}, {migraphx::shape::float_type, {2, 12, 1}}},
{{0, 0, -1}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, 0, -1}, {migraphx::shape::float_type, {2, 1, 12}}},
{{-1, 2, 3}, {migraphx::shape::float_type, {4, 2, 3}}},
{{-1, 0, 3}, {migraphx::shape::float_type, {8, 1, 3}}},
{{-1, 0, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{-1, 3, 0}, {migraphx::shape::float_type, {8, 3, 1}}}};
for(auto& it : minus1_tests)
{
expect_shape(it.second, migraphx::make_op("reshape_lazy", {{"dims", it.first}}), input);
}
}
// This uses the permutation to compute the reshape_lazy since its simpler than
// trying to calculate strides. As we collapse or expand dimensions, we
// remove the collapsed dimensions or duplicate the expanded dimensions in
// the permutation. Then we renumber the permutation. So for dimensions of 4,
// 24, 1, 1, 1 with a permutation of 1, 0, 2, 3, 4 that reshape_lazys to 4, 1, 3,
// 4, 2, we first remove the collapsed dimensions or duplicate the expanded
// dimensions which gives 1, 0, 0, 0, 0. Then after renumbering we get a
// final permutation of 4, 0, 1, 2, 3.
TEST_CASE(reshape_lazy_nonstandard)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
std::vector<std::pair<std::vector<std::size_t>, std::vector<int64_t>>> tests{
{{4, 24}, {1, 0}},
{{4, 24, 1, 1, 1, 1}, {1, 0, 2, 3, 4, 5}},
{{4, 8, 3, 1, 1}, {2, 0, 1, 3, 4}},
{{4, 1, 3, 4, 2}, {4, 0, 1, 2, 3}},
{{4, 1, 4, 3, 2}, {4, 0, 1, 2, 3}},
{{4, 2, 4, 3}, {3, 0, 1, 2}},
{{4, 2, 12, 1}, {2, 0, 1, 3}},
{{4, 2, 1, 12}, {3, 0, 1, 2}},
{{4, 4, 2, 3}, {3, 0, 1, 2}},
{{4, 8, 1, 3}, {3, 0, 1, 2}},
{{4, 8, 3, 1}, {2, 0, 1, 3}}};
for(const auto& [dims, perm] : tests)
{
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, dims, migraphx::invert_permutation(perm));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", dims}}), input);
}
}
TEST_CASE(reshape_lazy_nonstandard_squeeze)
{
auto input = migraphx::shape::from_permutation(
migraphx::shape::float_type, {2, 16, 16, 1280}, migraphx::invert_permutation({0, 2, 3, 1}));
std::vector<std::size_t> lens = {2, 256, 1280};
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, lens, migraphx::invert_permutation({0, 2, 1}));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", lens}}), input);
}
TEST_CASE(reshape_lazy_nonstandard_error)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
for(auto&& new_shape : std::vector<std::vector<int64_t>>{{4, 8, 3, 2, 2},
{1},
{4, 8, 4},
{4, 24, 1, 1, 1, 1, 2},
{8, 4, 4},
{4, 1, 3, -1, -1},
{4, 3, 0},
{4, 3, 2},
{3, 0},
{3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {4, 2, 8}, {32, 16, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {2, 2, 16}, {64, 32, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {64}, {2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 16, 80}, {0, 0, 80, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_dyn_shape)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
for(auto&& new_shape : std::vector<std::vector<int64_t>>{
{-1, 1, 1, 24}, {0, 8, 3, 1}, {-1, 3, 4, 2}, {0, 2, 4, 3}})
{
std::vector<migraphx::shape::dynamic_dimension> out_dyn_dims{};
for(std::size_t i = 0; i < new_shape.size(); ++i)
{
if(new_shape[i] == 0 or new_shape[i] == -1)
{
out_dyn_dims.push_back(input.dyn_dims().at(i));
}
else
{
std::size_t d = new_shape[i];
out_dyn_dims.push_back({d, d});
}
}
migraphx::shape output{migraphx::shape::float_type, out_dyn_dims};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_multiple_non_fixed_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 20}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 0, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_fixed_ele_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 10}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 5, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_non_fixed_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
std::vector<int64_t> new_shape = {2, 1, 1, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(return_shape_tuple)
{
using migraphx::shape;
......@@ -2822,7 +3213,7 @@ TEST_CASE(select_module_dyn)
input);
}
TEST_CASE(slice_shape)
TEST_CASE(slice_static_shape)
{
migraphx::shape input{migraphx::shape::int32_type, {2, 2, 3}};
expect_shape(migraphx::shape{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}},
......@@ -2840,6 +3231,292 @@ TEST_CASE(slice_shape)
input);
}
TEST_CASE(slice_var_inputs_static_shape0)
{
// attr ends and axes set; inputs are (data, input_starts)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"ends", {2, 3}}, {"axes", {1, 2}}}),
input,
starts);
}
TEST_CASE(slice_var_inputs_static_mismatch_error0)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"ends", {2, 3, 4}}, {"axes", {0, 1, 2}}}), input, starts);
}
TEST_CASE(slice_var_inputs_static_shape1)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"axes", {1, 2}}}),
input,
ends);
}
TEST_CASE(slice_var_inputs_static_mismatch_error1)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"axes", {0, 1, 2}}}), input, ends);
}
TEST_CASE(slice_var_inputs_static_shape2)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"ends", {1, 2}}}),
input,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error2)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"ends", {3, 4, 4}}}), input, axes);
}
TEST_CASE(slice_var_inputs_static_shape3)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"axes", {1, 2}}}),
input,
starts,
ends);
}
TEST_CASE(slice_var_inputs_static_mismatch_error3)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"axes", {0, 1, 2}}}), input, starts, ends);
}
TEST_CASE(slice_var_inputs_static_shape4)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"ends", {3, 4}}}),
input,
starts,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error4)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"ends", {3, 3, 3}}}), input, starts, axes);
}
TEST_CASE(slice_var_inputs_static_shape5)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 2}}}),
input,
ends,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error5)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"starts", {0, 1, 2}}}), input, ends, axes);
}
TEST_CASE(slice_var_inputs_static_shape6)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice"),
input,
starts,
ends,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error6)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {3}};
throws_shape(migraphx::make_op("slice"), input, starts, ends, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape0)
{
// attr ends and axes set; inputs are (data, input_starts)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"ends", {2, 3}}, {"axes", {1, 2}}}),
input,
starts);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error0)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"ends", {2, 3, 4}}, {"axes", {0, 1, 2}}}), input, starts);
}
TEST_CASE(slice_var_inputs_dyn_shape1)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"axes", {1, 2}}}),
input,
ends);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error1)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"axes", {0, 1, 2}}}), input, ends);
}
TEST_CASE(slice_var_inputs_dyn_shape2)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"ends", {8, 8}}}),
input,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error2)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"ends", {3, 4, 4}}}), input, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape3)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"axes", {1, 2}}}),
input,
starts,
ends);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error3)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"axes", {0, 1, 2}}}), input, starts, ends);
}
TEST_CASE(slice_var_inputs_dyn_shape4)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"ends", {3, 4}}}),
input,
starts,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error4)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"ends", {3, 3, 3}}}), input, starts, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape5)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 2}}}),
input,
ends,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error5)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"starts", {0, 1, 2}}}), input, ends, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape6)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {2, 4, {2, 4}}, {2, 4, {2, 4}}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 4}, {0, 4}}},
migraphx::make_op("slice"),
input,
starts,
ends,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error6)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {3}};
throws_shape(migraphx::make_op("slice"), input, starts, ends, axes);
}
TEST_CASE(slice_dyn_shape0)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3}, {7, 7}, {2, 3}}};
......@@ -2870,7 +3547,7 @@ TEST_CASE(slice_dyn_shape2)
TEST_CASE(slice_dyn_shape3)
{
// TODO: When variable dimension slicing is allowed, Slice to a size smaller than min.
// TODO: When non-fixed dimension slicing is allowed, Slice to a size smaller than min.
// Until then, this action is an error.
migraphx::shape input{migraphx::shape::int32_type, {{2, 3}, {7, 8}, {2, 3}}};
throws_shape(migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {1}}}),
......@@ -2901,8 +3578,6 @@ TEST_CASE(slice_dyn_shape5)
input);
}
TEST_CASE(softmax) { test_softmax_variations<migraphx::op::softmax>(); }
TEST_CASE(softmax_dyn0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/optimize_module.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/serialize.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m) { migraphx::run_passes(m, {migraphx::optimize_module{}}); }
TEST_CASE(broadcast_transpose_inner_broadcast)
{
// first optimizes broadcast+transpose to just a broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 2, 3}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb1);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb2, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mul = m2.add_instruction(migraphx::make_op("mul"), l2, l1);
auto mb =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), mul);
m2.add_return({mb});
}
EXPECT(m1 == m2);
}
TEST_CASE(broadcast_transpose_inner_broadcast_generic)
{
// first optimizes broadcast+transpose to unsqueeze+transpose+broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {5}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 10, 5}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb2);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb1, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {5}});
auto unsqueeze = m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), l2);
auto transpose = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), unsqueeze);
auto mb1 =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), l1);
auto mb2 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), transpose);
auto mul = m2.add_instruction(migraphx::make_op("mul"), mb1, mb2);
auto mb3 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), mul);
m2.add_return({mb3});
}
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/pad_calc.hpp>
#include "test.hpp"
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -23,8 +23,15 @@
#####################################################################################
include(PythonModules)
set(VENV ${CMAKE_BINARY_DIR}/test/py/venv)
set(VENV_ONNX ${CMAKE_BINARY_DIR}/test/py/venv-onnx)
set(REQUIREMENTS ${CMAKE_CURRENT_SOURCE_DIR}/requirements.txt)
set(REQUIREMENTS_ONNX ${CMAKE_CURRENT_SOURCE_DIR}/requirements-onnx.txt)
set(PYTHON_VERSION_TO_DISABLE_ONNX 3.6)
option(MIGRAPHX_DISABLE_VIRTUAL_ENV "Disable python virtual environments" OFF)
function(add_py_test NAME SCRIPT)
function(add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
......@@ -32,28 +39,73 @@ function(add_py_test NAME SCRIPT)
"MALLOC_CHECK_=3"
)
set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE})
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
if(NOT TEST py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env)
if (NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION} --clear)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set(PYTHON_EXECUTABLE ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION}/bin/python)
if(EXISTS ${REQUIREMENTS_FILE})
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install -r ${REQUIREMENTS_FILE})
else()
# If there is no requirements file, then there are no packages to install in the virtual env.
# Just create a placeholder test for setting up the required fixture for running the tests.
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install --help)
endif()
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
endif()
endforeach()
endfunction()
function(add_py_test NAME SCRIPT FIXTURE_NAME VENV_DIR)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
if(MIGRAPHX_DISABLE_VIRTUAL_ENV)
set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE})
else()
set(PYTHON_EXECUTABLE ${VENV_DIR}/${PYTHON_VERSION}/bin/python)
endif()
if(NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
if(NOT MIGRAPHX_DISABLE_VIRTUAL_ENV)
set_tests_properties(test_py_${PYTHON_VERSION}_${NAME} PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
endif()
endforeach()
endfunction()
add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py)
add_py_test(ref test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(op test_op.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(NOT MIGRAPHX_DISABLE_VIRTUAL_ENV)
add_py_venv_fixture(common ${VENV} ${REQUIREMENTS})
add_py_venv_fixture(onnx ${VENV_ONNX} ${REQUIREMENTS_ONNX})
endif()
add_py_test(ref test_cpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(op test_op.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_async test_gpu_async.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_offload test_gpu_offload.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py onnx ${VENV_ONNX} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_async test_gpu_async.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
endif()
......@@ -64,48 +64,1033 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
def disabled_tests_onnx_1_7_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
backend_test.exclude(r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
backend_test.exclude(r'test_nonmaxsuppression_center_point_box_format_cpu')
backend_test.exclude(r'test_nonmaxsuppression_flipped_coordinates_cpu')
backend_test.exclude(r'test_nonmaxsuppression_identical_boxes_cpu')
backend_test.exclude(r'test_nonmaxsuppression_limit_output_size_cpu')
backend_test.exclude(
r'test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu')
backend_test.exclude(r'test_nonmaxsuppression_suppress_by_IOU_cpu')
backend_test.exclude(r'test_nonmaxsuppression_two_batches_cpu')
backend_test.exclude(r'test_nonmaxsuppression_two_classes_cpu')
backend_test.exclude(r'test_nonzero_example_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_ConvTranspose2d_cpu')
backend_test.exclude(r'test_ConvTranspose2d_no_bias_cpu')
def disabled_tests_onnx_1_8_1(backend_test):
backend_test.exclude(r'test_if_seq_cpu')
# from OnnxBackendPyTorchOperatorModelTest
backend_test.exclude(r'test_operator_add_broadcast_cpu')
backend_test.exclude(r'test_operator_add_size1_right_broadcast_cpu')
backend_test.exclude(r'test_operator_addconstant_cpu')
backend_test.exclude(r'test_operator_convtranspose_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bitshift_left_uint16_cpu')
backend_test.exclude(r'test_bitshift_left_uint32_cpu')
backend_test.exclude(r'test_bitshift_left_uint64_cpu')
backend_test.exclude(r'test_bitshift_left_uint8_cpu')
backend_test.exclude(r'test_bitshift_right_uint16_cpu')
backend_test.exclude(r'test_bitshift_right_uint32_cpu')
backend_test.exclude(r'test_bitshift_right_uint64_cpu')
backend_test.exclude(r'test_bitshift_right_uint8_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_STRING_cpu')
backend_test.exclude(r'test_cast_STRING_to_FLOAT_cpu')
backend_test.exclude(r'test_compress_0_cpu')
backend_test.exclude(r'test_compress_1_cpu')
backend_test.exclude(r'test_compress_default_axis_cpu')
backend_test.exclude(r'test_compress_negative_axis_cpu')
backend_test.exclude(r'test_constant_pad_cpu')
backend_test.exclude(r'test_convinteger_with_padding_cpu')
backend_test.exclude(r'test_convtranspose_1d_cpu')
backend_test.exclude(r'test_det_2d_cpu')
backend_test.exclude(r'test_det_nd_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_max_adjusted_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_min_adjusted_cpu')
backend_test.exclude(r'test_edge_pad_cpu')
backend_test.exclude(r'test_einsum_batch_diagonal_cpu')
backend_test.exclude(r'test_einsum_batch_matmul_cpu')
backend_test.exclude(r'test_einsum_inner_prod_cpu')
backend_test.exclude(r'test_einsum_sum_cpu')
backend_test.exclude(r'test_einsum_transpose_cpu')
backend_test.exclude(r'test_hardmax_axis_0_cpu')
backend_test.exclude(r'test_hardmax_axis_1_cpu')
backend_test.exclude(r'test_hardmax_axis_2_cpu')
backend_test.exclude(r'test_hardmax_default_axis_cpu')
backend_test.exclude(r'test_hardmax_example_cpu')
backend_test.exclude(r'test_hardmax_negative_axis_cpu')
backend_test.exclude(r'test_hardmax_one_hot_cpu')
backend_test.exclude(r'test_matmulinteger_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_maxunpool_export_with_output_shape_cpu')
backend_test.exclude(r'test_maxunpool_export_without_output_shape_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int32_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int8_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_iinput_shape_is_NCd1_weight_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NC_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_mean_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_weight_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_no_weight_reduction_mean_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_mean_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_sum_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_mean_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_mean_weight_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_none_no_weight_cpu'
)
backend_test.exclude(r'test_qlinearconv_cpu')
backend_test.exclude(r'test_qlinearmatmul_2D_cpu')
backend_test.exclude(r'test_qlinearmatmul_3D_cpu')
backend_test.exclude(r'test_range_float_type_positive_delta_expanded_cpu')
backend_test.exclude(r'test_range_int32_type_negative_delta_expanded_cpu')
backend_test.exclude(r'test_reflect_pad_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_cubic_align_corners_cpu')
backend_test.exclude(r'test_resize_downsample_scales_cubic_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_linear_align_corners_cpu')
backend_test.exclude(r'test_resize_downsample_scales_linear_cpu')
backend_test.exclude(r'test_resize_downsample_scales_nearest_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_cubic_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_nearest_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_cubic_align_corners_cpu')
backend_test.exclude(r'test_resize_upsample_scales_cubic_asymmetric_cpu')
backend_test.exclude(r'test_resize_upsample_scales_cubic_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_linear_align_corners_cpu')
backend_test.exclude(r'test_resize_upsample_scales_linear_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_cubic_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_floor_align_corners_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu')
backend_test.exclude(r'test_reversesequence_batch_cpu')
backend_test.exclude(r'test_reversesequence_time_cpu')
backend_test.exclude(r'test_scan9_sum_cpu')
backend_test.exclude(r'test_scan_sum_cpu')
backend_test.exclude(r'test_slice_cpu')
backend_test.exclude(r'test_slice_default_axes_cpu')
backend_test.exclude(r'test_slice_default_steps_cpu')
backend_test.exclude(r'test_slice_end_out_of_bounds_cpu')
backend_test.exclude(r'test_slice_neg_cpu')
backend_test.exclude(r'test_slice_neg_steps_cpu')
backend_test.exclude(r'test_slice_negative_axes_cpu')
backend_test.exclude(r'test_slice_start_out_of_bounds_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu'
)
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_log_prob_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(r'test_softmax_cross_entropy_mean_weight_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_weights_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_sum_log_prob_expanded_cpu')
backend_test.exclude(r'test_split_zero_size_splits_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_lower_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_nochangecase_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_upper_cpu')
backend_test.exclude(r'test_strnormalizer_export_monday_empty_output_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_insensintive_upper_twodim_cpu')
backend_test.exclude(r'test_strnormalizer_nostopwords_nochangecase_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_only_bigrams_skip0_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_onlybigrams_skip5_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu')
backend_test.exclude(r'test_top_k_cpu')
backend_test.exclude(r'test_top_k_negative_axis_cpu')
backend_test.exclude(r'test_top_k_smallest_cpu')
backend_test.exclude(r'test_unique_not_sorted_without_axis_cpu')
backend_test.exclude(r'test_unique_sorted_with_axis_3d_cpu')
backend_test.exclude(r'test_unique_sorted_with_axis_cpu')
backend_test.exclude(r'test_unique_sorted_with_negative_axis_cpu')
backend_test.exclude(r'test_unique_sorted_without_axis_cpu')
backend_test.exclude(r'test_upsample_nearest_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_PReLU_1d_multiparam_cpu')
backend_test.exclude(r'test_PReLU_2d_multiparam_cpu')
backend_test.exclude(r'test_PReLU_3d_multiparam_cpu')
backend_test.exclude(r'test_ReplicationPad2d_cpu')
# from OnnxBackendPyTorchOperatorModelTest
backend_test.exclude(r'test_operator_add_size1_broadcast_cpu')
backend_test.exclude(r'test_operator_add_size1_singleton_broadcast_cpu')
# from OnnxBackendSimpleModelTest
backend_test.exclude(r'test_gradient_of_add_and_mul_cpu')
backend_test.exclude(r'test_gradient_of_add_cpu')
backend_test.exclude(r'test_sequence_model1_cpu')
backend_test.exclude(r'test_sequence_model2_cpu')
backend_test.exclude(r'test_sequence_model3_cpu')
backend_test.exclude(r'test_sequence_model4_cpu')
backend_test.exclude(r'test_sequence_model5_cpu')
backend_test.exclude(r'test_sequence_model6_cpu')
backend_test.exclude(r'test_sequence_model7_cpu')
backend_test.exclude(r'test_sequence_model8_cpu')
backend_test.exclude(r'test_strnorm_model_monday_casesensintive_lower_cpu')
backend_test.exclude(
r'test_strnorm_model_monday_casesensintive_nochangecase_cpu')
backend_test.exclude(r'test_strnorm_model_monday_casesensintive_upper_cpu')
backend_test.exclude(r'test_strnorm_model_monday_empty_output_cpu')
backend_test.exclude(
r'test_strnorm_model_monday_insensintive_upper_twodim_cpu')
backend_test.exclude(r'test_strnorm_model_nostopwords_nochangecase_cpu')
def disabled_tests_onnx_1_8_0(backend_test):
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_cast_BFLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_BFLOAT16_cpu')
backend_test.exclude(r'test_if_seq_cpu')
backend_test.exclude(r'test_loop11_cpu')
backend_test.exclude(r'test_loop13_seq_cpu')
backend_test.exclude(r'test_nllloss_NC_cpu')
backend_test.exclude(r'test_nllloss_NCd1_cpu')
backend_test.exclude(r'test_nllloss_NCd1_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1_mean_weight_negative_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1_weight_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_reduction_mean_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_reduction_sum_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_reduction_mean_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_reduction_sum_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2_with_weight_reduction_sum_ii_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2d3_none_no_weight_negative_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3_sum_weight_high_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3d4d5_mean_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3d4d5_none_no_weight_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_none_no_weight_negative_ii_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_3d_cpu')
backend_test.exclude(r'test_sce_mean_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_3d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_cpu')
backend_test.exclude(r'test_sce_mean_expanded_cpu')
backend_test.exclude(r'test_sce_mean_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_log_prob_cpu')
backend_test.exclude(
r'test_sce_mean_no_weight_ii_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_log_prob_cpu')
backend_test.exclude(
r'test_sce_mean_no_weight_ii_4d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_cpu')
backend_test.exclude(r'test_sce_mean_weight_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_none_cpu')
backend_test.exclude(r'test_sce_none_expanded_cpu')
backend_test.exclude(r'test_sce_none_log_prob_cpu')
backend_test.exclude(r'test_sce_none_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_none_weights_cpu')
backend_test.exclude(r'test_sce_none_weights_expanded_cpu')
backend_test.exclude(r'test_sce_none_weights_log_prob_cpu')
backend_test.exclude(r'test_sce_none_weights_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_sum_cpu')
backend_test.exclude(r'test_sce_sum_expanded_cpu')
backend_test.exclude(r'test_sce_sum_log_prob_cpu')
backend_test.exclude(r'test_sce_sum_log_prob_expanded_cpu')
backend_test.exclude(r'test_sequence_insert_at_back_cpu')
backend_test.exclude(r'test_sequence_insert_at_front_cpu')
backend_test.exclude(r'test_split_variable_parts_1d_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_cpu')
def disabled_tests_onnx_1_9_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_gru_batchwise_cpu')
backend_test.exclude(r'test_lstm_batchwise_cpu')
backend_test.exclude(r'test_simple_rnn_batchwise_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_convinteger_without_padding_cpu')
backend_test.exclude(r'test_convtranspose_autopad_same_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_tril_neg_cpu')
backend_test.exclude(r'test_tril_out_neg_cpu')
backend_test.exclude(r'test_tril_out_pos_cpu')
backend_test.exclude(r'test_tril_pos_cpu')
backend_test.exclude(r'test_tril_square_neg_cpu')
backend_test.exclude(r'test_tril_zero_cpu')
backend_test.exclude(r'test_triu_neg_cpu')
backend_test.exclude(r'test_triu_one_row_cpu')
backend_test.exclude(r'test_triu_out_neg_out_cpu')
backend_test.exclude(r'test_triu_out_pos_cpu')
backend_test.exclude(r'test_triu_pos_cpu')
backend_test.exclude(r'test_triu_square_neg_cpu')
backend_test.exclude(r'test_triu_zero_cpu')
def disabled_tests_onnx_1_10_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bernoulli_double_expanded_cpu')
backend_test.exclude(r'test_bernoulli_expanded_cpu')
backend_test.exclude(r'test_bernoulli_seed_expanded_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bernoulli_cpu')
backend_test.exclude(r'test_bernoulli_double_cpu')
backend_test.exclude(r'test_bernoulli_seed_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_expanded_cpu')
backend_test.exclude(r'test_castlike_STRING_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_STRING_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_optional_get_element_cpu')
backend_test.exclude(r'test_optional_get_element_sequence_cpu')
backend_test.exclude(r'test_optional_has_element_cpu')
backend_test.exclude(r'test_optional_has_element_empty_cpu')
def disabled_tests_onnx_1_11_0(backend_test):
# crash
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_roialign_aligned_false_cpu')
backend_test.exclude(r'test_roialign_aligned_true_cpu')
backend_test.exclude(r'test_scatternd_add_cpu')
backend_test.exclude(r'test_scatternd_multiply_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_gridsample_aligncorners_true_cpu')
backend_test.exclude(r'test_gridsample_bicubic_cpu')
backend_test.exclude(r'test_gridsample_bilinear_cpu')
backend_test.exclude(r'test_gridsample_border_padding_cpu')
backend_test.exclude(r'test_gridsample_cpu')
backend_test.exclude(r'test_gridsample_nearest_cpu')
backend_test.exclude(r'test_gridsample_reflection_padding_cpu')
backend_test.exclude(r'test_gridsample_zeros_padding_cpu')
backend_test.exclude(r'test_identity_opt_cpu')
backend_test.exclude(r'test_if_opt_cpu')
backend_test.exclude(r'test_loop16_seq_none_cpu')
def disabled_tests_onnx_1_12_0(backend_test):
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_blackmanwindow_cpu')
backend_test.exclude(r'test_blackmanwindow_expanded_cpu')
backend_test.exclude(r'test_blackmanwindow_symmetric_cpu')
backend_test.exclude(r'test_blackmanwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_dft_axis_cpu')
backend_test.exclude(r'test_dft_cpu')
backend_test.exclude(r'test_dft_inverse_cpu')
backend_test.exclude(r'test_hammingwindow_cpu')
backend_test.exclude(r'test_hammingwindow_expanded_cpu')
backend_test.exclude(r'test_hammingwindow_symmetric_cpu')
backend_test.exclude(r'test_hammingwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_hannwindow_cpu')
backend_test.exclude(r'test_hannwindow_expanded_cpu')
backend_test.exclude(r'test_hannwindow_symmetric_cpu')
backend_test.exclude(r'test_hannwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis0_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis1_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis_negative_1_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis_negative_2_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis0_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis1_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis2_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_1_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_2_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_3_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis0_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis1_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis2_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis3_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_1_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_2_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_3_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_4_cpu')
backend_test.exclude(r'test_layer_normalization_default_axis_cpu')
backend_test.exclude(r'test_melweightmatrix_cpu')
backend_test.exclude(r'test_sequence_map_add_1_sequence_1_tensor_cpu')
backend_test.exclude(
r'test_sequence_map_add_1_sequence_1_tensor_expanded_cpu')
backend_test.exclude(r'test_sequence_map_add_2_sequences_cpu')
backend_test.exclude(r'test_sequence_map_add_2_sequences_expanded_cpu')
backend_test.exclude(r'test_sequence_map_extract_shapes_cpu')
backend_test.exclude(r'test_sequence_map_extract_shapes_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_1_tensor_cpu')
backend_test.exclude(
r'test_sequence_map_identity_1_sequence_1_tensor_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_2_sequences_cpu')
backend_test.exclude(
r'test_sequence_map_identity_2_sequences_expanded_cpu')
backend_test.exclude(r'test_stft_cpu')
backend_test.exclude(r'test_stft_with_window_cpu')
def disabled_tests_onnx_1_13_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l1_negative_axes_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l1_negative_axes_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l2_negative_axes_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l2_negative_axes_keep_dims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_square_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_square_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_square_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_square_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_scatternd_max_cpu')
backend_test.exclude(r'test_scatternd_min_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bitwise_and_i16_3d_cpu')
backend_test.exclude(r'test_bitwise_and_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_and_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_and_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_bitwise_not_2d_cpu')
backend_test.exclude(r'test_bitwise_not_3d_cpu')
backend_test.exclude(r'test_bitwise_not_4d_cpu')
backend_test.exclude(r'test_bitwise_or_i16_4d_cpu')
backend_test.exclude(r'test_bitwise_or_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_or_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_or_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_bitwise_xor_i16_3d_cpu')
backend_test.exclude(r'test_bitwise_xor_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_xor_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_xor_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_and_pad_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_and_pad_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_chw_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_chw_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_hwc_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_hwc_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_pad_cpu')
backend_test.exclude(r'test_center_crop_pad_pad_expanded_cpu')
backend_test.exclude(r'test_col2im_5d_cpu')
backend_test.exclude(r'test_col2im_cpu')
backend_test.exclude(r'test_col2im_dilations_cpu')
backend_test.exclude(r'test_col2im_pads_cpu')
backend_test.exclude(r'test_col2im_strides_cpu')
backend_test.exclude(r'test_constant_pad_axes_cpu')
backend_test.exclude(r'test_group_normalization_epsilon_cpu')
backend_test.exclude(r'test_group_normalization_epsilon_expanded_cpu')
backend_test.exclude(r'test_group_normalization_example_cpu')
backend_test.exclude(r'test_group_normalization_example_expanded_cpu')
backend_test.exclude(r'test_mish_cpu')
backend_test.exclude(r'test_optional_get_element_optional_sequence_cpu')
backend_test.exclude(r'test_optional_get_element_optional_tensor_cpu')
backend_test.exclude(r'test_optional_get_element_tensor_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_name_optional_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_name_tensor_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_optional_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_tensor_input_cpu')
backend_test.exclude(r'test_optional_has_element_empty_optional_input_cpu')
backend_test.exclude(r'test_optional_has_element_optional_input_cpu')
backend_test.exclude(r'test_optional_has_element_tensor_input_cpu')
backend_test.exclude(r'test_prelu_broadcast_expanded_cpu')
backend_test.exclude(r'test_prelu_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l1_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l2_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l2_default_axes_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_resize_downsample_scales_cubic_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_scales_linear_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_cubic_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_linear_antialias_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_not_larger_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_not_smaller_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_axes_2_3_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_axes_2_3_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_axes_2_3_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_not_larger_cpu')
backend_test.exclude(r'test_scatter_elements_with_reduction_max_cpu')
backend_test.exclude(r'test_scatter_elements_with_reduction_min_cpu')
def disabled_tests_onnx_1_14_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_averagepool_2d_dilations_cpu')
backend_test.exclude(r'test_roialign_mode_max_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_basic_deform_conv_with_padding_cpu')
backend_test.exclude(r'test_basic_deform_conv_without_padding_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_negative_axes_hwc_cpu')
backend_test.exclude(
r'test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu')
backend_test.exclude(r'test_constant_pad_negative_axes_cpu')
backend_test.exclude(r'test_deform_conv_with_mask_bias_cpu')
backend_test.exclude(r'test_deform_conv_with_multiple_offset_groups_cpu')
backend_test.exclude(r'test_equal_string_broadcast_cpu')
backend_test.exclude(r'test_equal_string_cpu')
backend_test.exclude(r'test_lppool_1d_default_cpu')
backend_test.exclude(r'test_lppool_2d_default_cpu')
backend_test.exclude(r'test_lppool_2d_dilations_cpu')
backend_test.exclude(r'test_lppool_2d_pads_cpu')
backend_test.exclude(r'test_lppool_2d_same_lower_cpu')
backend_test.exclude(r'test_lppool_2d_same_upper_cpu')
backend_test.exclude(r'test_lppool_2d_strides_cpu')
backend_test.exclude(r'test_lppool_3d_default_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_linear_half_pixel_symmetric_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_linear_half_pixel_symmetric_cpu')
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_softplus_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softplus_expanded_ver18_cpu')
backend_test.exclude(r'test_split_to_sequence_1_cpu')
backend_test.exclude(r'test_split_to_sequence_2_cpu')
backend_test.exclude(r'test_split_to_sequence_nokeepdims_cpu')
backend_test.exclude(r'test_wrap_pad_cpu')
def disabled_tests_float8(backend_test):
# e4m3fn (Prototensor data type 17 not supported)
backend_test.exclude(r'test_dequantizelinear_e4m3fn_cpu')
backend_test.exclude(r'test_quantizelinear_e4m3fn_cpu')
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FN_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FN_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded_cpu')
# e4m3fnuz (Prototensor data type 18 not supported)
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FNUZ_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(
r'test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FN_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded_cpu')
# e5m2 ( Prototensor data type 19 not supported )
backend_test.exclude(r'test_dequantizelinear_e5m2_cpu')
backend_test.exclude(r'test_quantizelinear_e5m2_cpu')
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2_expanded_cpu')
# e5m2fnuz (Prototensor data type 20 not supported)
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2FNUZ_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(
r'test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded_cpu')
def disabled_tests_dynamic_shape(backend_test):
# constantofshape
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
# cumsum
backend_test.exclude(r'test_cumsum_1d_cpu')
backend_test.exclude(r'test_cumsum_1d_exclusive_cpu')
backend_test.exclude(r'test_cumsum_1d_reverse_cpu')
backend_test.exclude(r'test_cumsum_1d_reverse_exclusive_cpu')
backend_test.exclude(r'test_cumsum_2d_axis_0_cpu')
backend_test.exclude(r'test_cumsum_2d_axis_1_cpu')
backend_test.exclude(r'test_cumsum_2d_negative_axis_cpu')
# expand
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
# onehot
backend_test.exclude(r'test_onehot_negative_indices_cpu')
backend_test.exclude(r'test_onehot_with_axis_cpu')
backend_test.exclude(r'test_onehot_with_negative_axis_cpu')
backend_test.exclude(r'test_onehot_without_axis_cpu')
# range
backend_test.exclude(r'test_range_float_type_positive_delta_cpu')
backend_test.exclude(r'test_range_int32_type_negative_delta_cpu')
# split
backend_test.exclude(r'test_split_variable_parts_1d_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_1d_opset18_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_opset18_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_opset18_cpu')
backend_test.exclude(r'test_split_zero_size_splits_opset13_cpu')
backend_test.exclude(r'test_split_zero_size_splits_opset18_cpu')
# squeeze
backend_test.exclude(r'test_squeeze_cpu')
backend_test.exclude(r'test_squeeze_negative_axes_cpu')
# unsqueeze
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
# tile
backend_test.exclude(r'test_tile_cpu')
backend_test.exclude(r'test_tile_precomputed_cpu')
# reshape
backend_test.exclude(r'test_reshape_allowzero_reordered_cpu')
backend_test.exclude(r'test_reshape_extended_dims_cpu')
backend_test.exclude(r'test_reshape_negative_dim_cpu')
backend_test.exclude(r'test_reshape_negative_extended_dims_cpu')
backend_test.exclude(r'test_reshape_one_dim_cpu')
backend_test.exclude(r'test_reshape_reduced_dims_cpu')
backend_test.exclude(r'test_reshape_reordered_all_dims_cpu')
backend_test.exclude(r'test_reshape_reordered_last_dims_cpu')
backend_test.exclude(r'test_reshape_zero_and_negative_dim_cpu')
backend_test.exclude(r'test_reshape_zero_dim_cpu')
# reduce
backend_test.exclude(
r'test_reduce_l1_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_negative_axes_keep_dims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_negative_axes_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_negative_axes_keep_dims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_negative_axes_keep_dims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_asc_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_asc_axes_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_default_cpu')
backend_test.exclude(r'test_reduce_log_sum_default_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_desc_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_desc_axes_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_negative_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_negative_axes_expanded_cpu')
backend_test.exclude(r'test_reduce_max_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_max_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_max_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_mean_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_prod_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_prod_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_prod_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_example_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
def disabled_tests_onnx_1_10_0(backend_test):
# unsupported shape attributes
backend_test.exclude(r'test_shape_end_1_cpu')
backend_test.exclude(r'test_shape_end_negative_1_cpu')
backend_test.exclude(r'test_shape_start_1_cpu')
backend_test.exclude(r'test_shape_start_1_end_2_cpu')
backend_test.exclude(r'test_shape_start_1_end_negative_1_cpu')
backend_test.exclude(r'test_shape_start_negative_1_cpu')
def disabled_tests_onnx_1_12_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_random_expanded_cpu')
def create_backend_test(testname=None, target_device=None):
......@@ -116,8 +1101,7 @@ def create_backend_test(testname=None, target_device=None):
if testname:
backend_test.include(testname + '.*')
else:
# Include all of the nodes that we support.
# Onnx native node tests
# Onnx Operator tests
backend_test.include(r'.*test_abs.*')
backend_test.include(r'.*test_acos.*')
backend_test.include(r'.*test_acosh.*')
......@@ -131,73 +1115,209 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_atanh.*')
backend_test.include(r'.*test_averagepool.*')
backend_test.include(r'.*test_AvgPool.*')
backend_test.include(r'.*test_BatchNorm.*eval.*')
backend_test.include(r'.*test_[bB]atch[nN]orm(?!.*training).*')
backend_test.include(r'.*test_bitshift.*')
backend_test.include(r'.*test_bitwise.*')
backend_test.include(r'.*test_ceil.*')
backend_test.include(r'.*test_celu.*')
backend_test.include(r'.*test_clip.*')
backend_test.include(r'.*test_cast_.*')
backend_test.include(r'.*test_col2im.*')
backend_test.include(r'.*test_compress.*')
backend_test.include(r'.*test_concat.*')
backend_test.include(r'.*test_constant.*')
backend_test.include(r'.*test_Conv[1-3]d*')
backend_test.include(r'.*test_constant_.*')
backend_test.include(r'.*test_Constant.*')
backend_test.include(r'.*test_constantofshape.*')
backend_test.include(r'.*test_(basic_)?conv_.*')
backend_test.include(r'.*test_Conv[1-3]d.*')
backend_test.include(r'.*test_convinteger.*')
backend_test.include(r'.*test_convtranspose.*')
backend_test.include(r'.*test_ConvTranspose[1-3]d.*')
backend_test.include(r'.*test_cos.*')
backend_test.include(r'.*test_cosh.*')
backend_test.include(r'.*test_cumsum.*')
backend_test.include(r'.*test_(basic_)?deform_conv.*')
backend_test.include(r'.*test_depthtospace.*')
backend_test.include(r'.*test_dequantizelinear')
backend_test.include(r'.*test_dequantizelinear.*')
backend_test.include(r'.*test_det.*')
backend_test.include(r'.*test_dft.*')
backend_test.include(r'.*test_div.*')
backend_test.include(r'.*test_dropout.*')
backend_test.include(r'.*test_ELU*')
backend_test.include(r'.*test_elu.*')
backend_test.include(r'.*test_einsum.*')
backend_test.include(r'.*test_equal.*')
backend_test.include(r'.*test_Embedding*')
backend_test.include(r'.*test_exp.*')
backend_test.include(r'.*test_Embedding.*')
backend_test.include(r'.*test_erf.*')
backend_test.include(r'.*test_exp_.*')
backend_test.include(r'.*test_expand.*')
backend_test.include(r'.*test_eyelike.*')
backend_test.include(r'.*test_flatten.*')
backend_test.include(r'.*test_floor.*')
backend_test.include(r'.*test_fmod.*')
backend_test.include(r'.*test_gru.*')
backend_test.include(r'.*test_gather.*')
backend_test.include(r'.*test_gemm.*')
backend_test.include(r'.*test_globalaveragepool.*')
backend_test.include(r'.*test_globallppool.*')
backend_test.include(r'.*test_globalmaxpool.*')
backend_test.include(r'.*test_greater.*')
backend_test.include(r'.*test_hardsigmoid.*')
backend_test.include(r'.*test_hardswish.*')
backend_test.include(r'.*test_gridsample.*')
backend_test.include(r'.*test_hardmax.*')
backend_test.include(r'.*test_identity.*')
backend_test.include(r'.*test_if.*')
backend_test.include(r'.*test_instancenorm.*')
backend_test.include(r'.*test_isinf.*')
backend_test.include(r'.*test_isnan.*')
backend_test.include(r'.*test_LeakyReLU*')
backend_test.include(r'.*test_leakyrelu.*')
backend_test.include(r'.*test_less.*')
backend_test.include(r'.*test_Linear.*')
backend_test.include(r'.*test_log.*')
backend_test.include(r'.*test_logsoftmax.*')
backend_test.include(r'.*test_LogSoftmax.*')
backend_test.include(r'.*test_log_softmax.*')
backend_test.include(r'.*test_lrn.*')
backend_test.include(r'.*test_lstm.*')
backend_test.include(r'.*test_log.*')
backend_test.include(r'.*test_loop.*')
backend_test.include(r'.*test_lpnorm.*')
backend_test.include(r'.*test_lppool.*')
backend_test.include(r'.*test_matmul.*')
backend_test.include(r'.*test_max.*')
backend_test.include(r'.*test_MaxPool[1-9]d.*')
backend_test.include(r'.*test_max_.*')
backend_test.include(r'.*test_maxpool.*')
backend_test.include(r'.*test_MaxPool[1-3]d.*')
backend_test.include(r'.*test_maxroipool.*')
backend_test.include(r'.*test_maxunpool.*')
backend_test.include(r'.*test_mean.*')
backend_test.include(r'.*test_melweightmatrix.*')
backend_test.include(r'.*test_min.*')
backend_test.include(r' .*test_mod.*')
backend_test.include(r'.*test_mod.*')
backend_test.include(r'.*test_mul.*')
backend_test.include(r'.*test_multinomial.*')
backend_test.include(r'.*test_Multinomial.*')
backend_test.include(r'.*test_[mM]ultinomial.*')
backend_test.include(r'.*test_neg.*')
backend_test.include(r'.*test_nonmaxsuppression.*')
backend_test.include(r'.*test_nonzero.*')
backend_test.include(r'.*test_not.*')
backend_test.include(r'.*test_onehot.*')
backend_test.include(r'.*optional_get_element.*')
backend_test.include(r'.*optional_has_element.*')
backend_test.include(r'.*test_or.*')
backend_test.include(r'.*test_(constant_|edge_|reflect_|wrap_)?pad.*')
backend_test.include(
r'.*test_(Constant|Reflection|Replication|Zero)+Pad2d.*')
backend_test.include(r'.*test_pow.*')
backend_test.include(r'.*test_qlinearconv.*')
backend_test.include(r'.*test_qlinearmatmul.*')
backend_test.include(r'.*test_quantizelinear.*')
backend_test.include(r'.*test_(simple_)?rnn.*')
backend_test.include(r'.*test_randomnormal.*')
backend_test.include(r'.*test_randomuniform.*')
backend_test.include(r'.*test_reciprocal.*')
backend_test.include(r'.*test_reduce_max.*')
backend_test.include(r'.*test_reduce_mean.*')
backend_test.include(r'.*test_reduce_min.*')
backend_test.include(r'.*test_reduce_prod.*')
backend_test.include(r'.*test_reduce_sum.*')
backend_test.include(r'.*test_reshape.*')
backend_test.include(r'.*test_resize.*')
backend_test.include(r'.*test_reversesequence.*')
backend_test.include(r'.*test_roialign.*')
backend_test.include(r'.*test_round.*')
backend_test.include(r'.*test_stft.*')
backend_test.include(r'.*test_scan.*')
backend_test.include(r'.*test_scatter.*')
backend_test.include(r'.*test_sequence_at.*')
backend_test.include(r'.*test_sequence_construct.*')
backend_test.include(r'.*test_sequence_empty.*')
backend_test.include(r'.*test_sequence_erase.*')
backend_test.include(r'.*test_sequence_insert.*')
backend_test.include(r'.*test_sequence_length.*')
backend_test.include(r'.*test_shape.*')
backend_test.include(r'.*test_[sS]igmoid.*')
backend_test.include(r'.*test_sign.*')
backend_test.include(r'.*test_sin_.*')
backend_test.include(r'.*test_sinh.*')
backend_test.include(r'.*test_size.*')
backend_test.include(r'.*test_slice.*')
backend_test.include(r'.*test_spacetodepth.*')
backend_test.include(r'.*test_split.*')
backend_test.include(r'.*test_split_to_sequence.*')
backend_test.include(r'.*test_sqrt.*')
backend_test.include(r'.*test_squeeze.*')
backend_test.include(r'.*test_squeeze.*')
backend_test.include(r'.*test_strnorm.*')
backend_test.include(r'.*test_sub.*')
backend_test.include(r'.*test_sum.*')
backend_test.include(r'.*test_tan_.*')
backend_test.include(r'.*test_[tT]anh.*')
backend_test.include(r'.*test_tfidfvectorizer.*')
backend_test.include(r'.*test_tile.*')
backend_test.include(r'.*test_top_k.*')
backend_test.include(r'.*test_transpose.*')
backend_test.include(r'.*test_tril.*')
backend_test.include(r'.*test_triu.*')
backend_test.include(r'.*test_unique.*')
backend_test.include(r'.*test_unsqueeze.*')
backend_test.include(r'.*test_upsample.*')
backend_test.include(r'.*test_where.*')
backend_test.include(r'.*test_xor.*')
# Onnx Function tests
backend_test.include(r'.*test_bernoulli.*')
backend_test.include(r'.*test_blackmanwindow.*')
backend_test.include(r'.*test_castlike.*')
backend_test.include(r'.*test_celu.*')
backend_test.include(r'.*test_center_crop_pad.*')
backend_test.include(r'.*test_clip.*')
backend_test.include(r'.*test_dynamicquantizelinear.*')
backend_test.include(r'.*test_elu.*')
backend_test.include(r'.*test_ELU.*')
backend_test.include(r'.*test_GLU.*')
backend_test.include(r'.*test_greater_equal.*')
backend_test.include(r'.*test_group_normalization.*')
backend_test.include(r'.*test_hammingwindow.*')
backend_test.include(r'.*test_hannwindow.*')
backend_test.include(r'.*test_hardsigmoid.*')
backend_test.include(r'.*test_hardswish.*')
backend_test.include(r'.*test_layer_normalization.*')
backend_test.include(r'.*test_LeakyReLU.*')
backend_test.include(r'.*test_leakyrelu.*')
backend_test.include(r'.*test_less.*')
backend_test.include(r'.*test_Linear.*')
backend_test.include(r'.*test_logsoftmax.*')
backend_test.include(r'.*test_log_softmax.*')
backend_test.include(r'.*test_LogSoftmax.*')
backend_test.include(r'.*test_mvn.*')
backend_test.include(r'.*test_mish.*')
backend_test.include(r'.*test_nllloss.*')
backend_test.include(r'.*test_PixelShuffle.*')
backend_test.include(r'.*test_PoissonNLLLLoss_no_reduce.*')
backend_test.include(r'.*test_prelu.*')
backend_test.include(r'.*test_PReLU.*')
backend_test.include(r'.*test_range.*')
backend_test.include(r'.*test_reduce_l1.*')
backend_test.include(r'.*test_reduce_l2.*')
backend_test.include(r'.*test_reduce_log.*')
backend_test.include(r'.*test_ReLU.*')
backend_test.include(r'.*test_relu.*')
backend_test.include(r'.*test_selu.*')
backend_test.include(r'.*test_SELU.*')
backend_test.include(r'.*test_sequence_map.*')
backend_test.include(r'.*test_shrink.*')
backend_test.include(r'.*test_[sS]oftmax.*')
backend_test.include(r'.*test_[sS]oftplus.*')
backend_test.include(r'.*test_[sS]oftsign.*')
backend_test.include(r'.*test_sce.*')
backend_test.include(r'.*test_thresholdedrelu.*')
# OnnxBackendPyTorchOperatorModelTest
backend_test.include(r'.*test_operator_add_broadcast.*')
backend_test.include(r'.*test_operator_addconstant.*')
backend_test.include(r'.*test_operator_addmm.*')
backend_test.include(r'.*test_operator_add_size1.*')
backend_test.include(r'.*test_operator_basic.*')
backend_test.include(r'.*test_operator_chunk.*')
backend_test.include(r'.*test_operator_clip.*')
backend_test.include(r'.*test_operator_concat2.*')
backend_test.include(r'.*test_operator_conv_.*')
backend_test.include(r'.*test_operator_convtranspose.*')
backend_test.include(r'.*test_operator_exp.*')
backend_test.include(r'.*test_operator_flatten.*')
backend_test.include(r'.*test_operator_index.*')
backend_test.include(r'.*test_operator_max_.*')
backend_test.include(r'.*test_operator_maxpool.*')
backend_test.include(r'.*test_operator_min.*')
backend_test.include(r'.*test_operator_mod.*')
backend_test.include(r'.*test_operator_mm.*')
backend_test.include(r'.*test_operator_non_float_params.*')
backend_test.include(r'.*test_operator_pad.*')
backend_test.include(r'.*test_operator_params.*')
backend_test.include(r'.*test_operator_permute2.*')
backend_test.include(r'.*test_operator_pow.*')
......@@ -205,55 +1325,19 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_operator_reduced_mean_keepdim.*')
backend_test.include(r'.*test_operator_reduced_sum_.*')
backend_test.include(r'.*test_operator_reduced_sum_keepdim.*')
backend_test.include(r'.*test_operator_repeat.*')
backend_test.include(r'.*test_operator_selu.*')
backend_test.include(r'.*test_operator_sqrt.*')
backend_test.include(r'.*test_operator_symbolic_override.*')
backend_test.include(r'.*test_operator_symbolic_override_nested.*')
backend_test.include(r'.*test_operator_view.*')
backend_test.include(r'.*test_or.*')
backend_test.include(r'.*test_pow.*')
backend_test.include(r'.*test_PoissonNLLLLoss_no_reduce*')
backend_test.include(r'.*test_quantizelinear')
backend_test.include(r'.*test_reciprocal.*')
backend_test.include(r'.*test_reduce.*')
backend_test.include(r'.*test_ReLU*')
backend_test.include(r'.*test_relu.*')
#backend_test.include(r'.*test_reversesequence.*')
backend_test.include(r'.*test_RoiAlign*')
backend_test.include(r'.*test_roialign.*')
backend_test.include(r'.*test_scatter.*')
backend_test.include(r'.*test_Scatter.*')
backend_test.include(r'.*test_selu.*')
backend_test.include(r'.*test_shape.*')
backend_test.include(r'.*test_Sigmoid*')
backend_test.include(r'.*test_sigmoid.*')
backend_test.include(r'.*test_sin.*')
backend_test.include(r'.*test_sinh.*')
backend_test.include(r'.*test_size.*')
backend_test.include(r'.*test_Softmax*')
backend_test.include(r'.*test_softmax.*')
backend_test.include(r'.*test_Softmin*')
backend_test.include(r'.*test_Softplus*')
backend_test.include(r'.*test_softplus.*')
backend_test.include(r'.*test_softsign.*')
backend_test.include(r'.*test_sqrt.*')
backend_test.include(r'.*test_squeeze_cuda')
backend_test.include(r'.*test_sub.*')
backend_test.include(r'.*test_sum.*')
backend_test.include(r'.*test_tan.*')
backend_test.include(r'.*test_Tanh*')
backend_test.include(r'.*test_tanh.*')
backend_test.include(r'.*test_thresholdedrelu.*')
backend_test.include(r'.*test_topk.*')
backend_test.include(r'.*test_Topk.*')
backend_test.include(r'.*test_transpose.*')
backend_test.include(r'.*test_unsqueeze.*')
backend_test.include(r'.*test_where*')
backend_test.include(r'.*test_where.*')
backend_test.include(r'.*test_xor.*')
backend_test.include(r'.*test_ZeroPad2d*')
# # Onnx native model tests
# OnnxBackendSimpleModelTest
backend_test.include(r'.*test_gradient_of.*')
backend_test.include(r'.*test_sequence_model.*')
backend_test.include(r'.*test_single_relu_model.*')
# OnnxBackendRealModelTest
backend_test.include(r'.*test_bvlc_alexnet.*')
backend_test.include(r'.*test_densenet121.*')
backend_test.include(r'.*test_inception_v1.*')
......@@ -264,76 +1348,58 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_vgg19.*')
backend_test.include(r'.*test_zfnet512.*')
# exclude unenabled ops get pulled in with wildcards
# test_constant_pad gets pulled in with the test_constant* wildcard. Explicitly disable padding tests for now.
# Operator MATMULINTEGER is not supported by TRT
backend_test.exclude(r'.*test_matmulinteger.*')
backend_test.exclude(r'.*test_maxunpool.*')
# Absolute diff failed because
# numpy compares the difference between actual and desired to atol + rtol * abs(desired)
# failed test cases
backend_test.exclude(
r'test_argmax_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmax_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmax_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_lrn_cpu')
backend_test.exclude(r'test_lrn_default_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
# error cases
backend_test.exclude(r'test_constant_pad_cpu')
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
# all reduce ops have dynamic axes inputs
backend_test.exclude(r'test_softmax_cross_entropy_*')
backend_test.exclude(r'test_Embedding_cpu')
# real model tests
# Skipped tests
# backend_test.include(r'.*test_adagrad.*')
# backend_test.include(r'.*test_adam.*')
# backend_test.include(r'.*test_ai_onnx_ml.*')
# backend_test.include(r'.*test_batchnorm_epsilon_training.*')
# backend_test.include(r'.*test_batchnorm_example_training.*')
# backend_test.include(r'.*test_momentum.*')
# backend_test.include(r'.*test_nesterov_momentum.*')
# backend_test.include(r'.*test_training_dropout.*')
# backend_test.include(r'.*test_Softmin.*')
# Exclude failing tests
# from OnnxBackendRealModelTest
backend_test.exclude(r'test_inception_v1_cpu')
backend_test.exclude(r'test_resnet50_cpu')
backend_test.exclude(r'test_squeezenet_cpu')
# PRelu OnnxBackendPyTorchConvertedModelTest has wrong dim for broadcasting
backend_test.exclude(r'[a-z,_]*PReLU_[0-9]d_multiparam[a-z,_]*')
# Remove when float8 is supported
disabled_tests_float8(backend_test)
# Remove when dynamic shapes are supported
disabled_tests_dynamic_shape(backend_test)
# additional cases disabled for a specific onnx version
if version.parse(onnx.__version__) <= version.parse("1.7.0"):
if version.parse(onnx.__version__) >= version.parse("1.7.0"):
disabled_tests_onnx_1_7_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.8.0"):
disabled_tests_onnx_1_8_1(backend_test)
disabled_tests_onnx_1_8_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.9.0"):
disabled_tests_onnx_1_9_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.10.0"):
disabled_tests_onnx_1_10_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.11.0"):
disabled_tests_onnx_1_11_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.12.0"):
disabled_tests_onnx_1_12_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.13.0"):
disabled_tests_onnx_1_13_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.14.0"):
disabled_tests_onnx_1_14_0(backend_test)
# import all test cases at global scope to make
# them visible to python.unittest.
......
#!/bin/bash
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -24,28 +22,8 @@
# THE SOFTWARE.
#####################################################################################
set -e
if [ -z "$ONNX_HOME" ]
then
# The onnx library uses ONNX_HOME, by default if it doesn't exist
# the path of " ~/.onnx " is used
ONNX_HOME=$HOME/.onnx
fi
model_dir=$ONNX_HOME/models
tmp_dir=$ONNX_HOME/tmp/
mkdir -p $model_dir
mkdir -p $tmp_dir
models="bvlc_alexnet \
densenet121 \
inception_v2 \
shufflenet \
vgg19 \
zfnet512"
for name in $models
do
curl https://download.onnxruntime.ai/onnx/models/$name.tar.gz --output $tmp_dir/$name.tar.gz
tar -xzvf $tmp_dir/$name.tar.gz --directory $model_dir && rm $tmp_dir/$name.tar.gz
done
onnx==1.14.1
protobuf==3.20.2
numpy==1.21.6
packaging==23.0
pytest==6.0.1
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -21,12 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import sys
import migraphx
try:
import numpy as np
except:
sys.exit()
def test_conv_relu():
......@@ -55,8 +50,12 @@ def test_sub_uint64():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.arange(120).reshape(shapes["0"].lens()).astype(np.uint64)
params["1"] = np.arange(20).reshape(shapes["1"].lens()).astype(np.uint64)
params["0"] = migraphx.create_argument(
migraphx.shape(type='uint64_type', lens=shapes["0"].lens()),
list(range(120)))
params["1"] = migraphx.create_argument(
migraphx.shape(type='uint64_type', lens=shapes["1"].lens()),
list(range(20)))
r = p.run(params)
print(r)
......@@ -71,7 +70,9 @@ def test_neg_int64():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.arange(6).reshape(shapes["0"].lens()).astype(np.int64)
params["0"] = migraphx.create_argument(
migraphx.shape(type='int64_type', lens=shapes["0"].lens()),
list(range(6)))
r = p.run(params)
print(r)
......@@ -86,8 +87,9 @@ def test_nonzero():
params = {}
shapes = p.get_parameter_shapes()
params["data"] = np.array([1, 1, 0,
1]).reshape(shapes["data"].lens()).astype(bool)
params["data"] = migraphx.create_argument(
migraphx.shape(type='bool_type', lens=shapes["data"].lens()),
[1, 1, 0, 1])
r = p.run(params)
print(r)
......@@ -105,8 +107,8 @@ def test_fp16_imagescaler():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.random.randn(768).reshape(shapes["0"].lens()).astype(
np.float16)
params["0"] = migraphx.generate_argument(
migraphx.shape(type='half_type', lens=shapes["0"].lens()), 768)
r = p.run(params)[-1]
print(r)
......@@ -124,10 +126,12 @@ def test_if_pl():
params = {}
shapes = p.get_parameter_shapes()
params["x"] = np.ones(6).reshape(shapes["x"].lens()).astype(np.float32)
params["y"] = np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0
]).reshape(shapes["y"].lens()).astype(np.float32)
params["cond"] = np.array([1]).reshape(()).astype(bool)
params["x"] = migraphx.fill_argument(
migraphx.shape(type='float_type', lens=shapes["x"].lens()), 1)
params["y"] = migraphx.fill_argument(
migraphx.shape(type='float_type', lens=shapes["y"].lens()), 2.0)
params["cond"] = migraphx.fill_argument(
migraphx.shape(type="bool", lens=[1], strides=[0]), 1)
r = p.run(params)[-1]
print(r)
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -23,10 +23,55 @@
#####################################################################################
import migraphx
import ctypes
import os
import glob
def test_conv_relu():
hip = ctypes.cdll.LoadLibrary("libamdhip64.so")
# Full path of the library is needed to fix an issue on sles
# where the library is not loaded otherwise.
# We check for the presence of library at the following paths,
# in the order listed below:
#
# 1. 'rocm_path' environment variable
# 2. /opt/rocm
# 3. /opt/rocm-*
#
# If the library is not found at any of these paths, we fall back
# to the library path being detected automatically.
library = "libamdhip64.so"
# Environment variable containing path to rocm
rocm_path_env_var = "rocm_path"
# Check for rocm_path, default to /opt/rocm if it does not exist.
rocm_path_var = os.getenv(rocm_path_env_var, default="/opt/rocm")
# Join the paths to the library to get full path,
# e.g. /opt/rocm/lib/libamdhip64.so
library_file = os.path.join(rocm_path_var, "lib", library)
# Check if the library file exists at the specified path
if os.path.exists(library_file):
# Replace library name by full path to the library
library = library_file
else:
# Pattern match to look for path to different
# rocm versions: /opt/rocm-*
rocm_path_pattern = "/opt/rocm-*/lib/libamdhip64.so"
matching_libraries = glob.glob(rocm_path_pattern)
if matching_libraries:
# Replace library name by full path to the first
# library found.
library = matching_libraries[0]
# Loads library either by using the full path to the
# library, if it has been detected earlier,
# or, proceeds to load the library based on the name
# of the library.
hip = ctypes.cdll.LoadLibrary(library)
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment