Commit 8d32c6b8 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into blas_tuning

parents 23cb7917 f25606f9
reshape_variable_input_test:p

0
12"Reshapereshape_variable_input_testZ
0



Z
1

b
2


B
\ No newline at end of file
 slice_var_input_static1:
)
data
starts
ends
axesoutput"Sliceslice_var_input_static1Z
data


Z
starts

Z
ends

Z
axes

b
output


B
\ No newline at end of file
 slice_var_input_steps_error:
0arg_step"Constant*
value**Bstep
3
data
starts
ends
axes
arg_stepoutput"Sliceslice_var_input_steps_errorZ
data


Z
starts

Z
ends

Z
axes

b
output


B
\ No newline at end of file
......@@ -24,7 +24,6 @@
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/pass_manager.hpp>
......@@ -48,7 +47,7 @@ TEST_CASE(averagepool_notset_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(averagepool_nt_cip_test)
......@@ -66,7 +65,7 @@ TEST_CASE(averagepool_nt_cip_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {8.33333};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_flat_test)
......@@ -77,15 +76,15 @@ TEST_CASE(batch_norm_flat_test)
migraphx::shape x_shape{migraphx::shape::float_type, {10}};
migraphx::shape c_shape(migraphx::shape::float_type, {1});
std::vector<float> x_data = {1.6524342,
-0.51048076,
0.32543048,
2.4410043,
2.0833702,
0.44981122,
1.0044622,
-0.24006313,
-0.43065986,
0.07626268};
-0.51048076,
0.32543048,
2.4410043,
2.0833702,
0.44981122,
1.0044622,
-0.24006313,
-0.43065986,
0.07626268};
std::vector<float> scale_data = {-0.02927135};
std::vector<float> bias_data = {0.42347777};
std::vector<float> mean_data = {-0.00449735};
......@@ -112,7 +111,7 @@ TEST_CASE(batch_norm_flat_test)
0.43305403,
0.4408022,
0.42019472};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
......@@ -149,7 +148,7 @@ TEST_CASE(batch_norm_rank_2_test)
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
......@@ -185,7 +184,7 @@ TEST_CASE(batch_norm_1d_test)
0.4927, 0.771, -1.956, -2.123, -0.664, -0.583, -0.7207, -0.5127};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_2d_test)
......@@ -251,7 +250,7 @@ TEST_CASE(batch_norm_2d_test)
-2.76707697e+00, 1.47579327e+01, 4.94736385e+00, 2.68847847e+01, -6.49254417e+00,
1.94286156e+00, -7.19223642e+00, -3.70413971e+00, -4.04303551e-01, -1.01827660e+01,
1.49476433e+00};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(batch_norm_3d_test)
......@@ -293,7 +292,7 @@ TEST_CASE(batch_norm_3d_test)
6.098, 11.03, 2.81, 2.81, 2.81, 12.125, 3.143, 8.53, 17.52, 4.938, 15.71,
1.347, 4.938, 1.167, 6.098, 12.67, 12.67, 4.453, 4.453, -0.4768, 12.67};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(celu_verify_test)
......@@ -310,12 +309,12 @@ TEST_CASE(celu_verify_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> correct(6);
std::vector<float> gold(6);
float alpha = 0.5;
std::transform(data.begin(), data.end(), correct.begin(), [&](auto x) {
std::transform(data.begin(), data.end(), gold.begin(), [&](auto x) {
return std::max(0.0f, x) + std::min(0.0f, alpha * std::expm1(x / alpha));
});
EXPECT(migraphx::verify::verify_range(result_vector, correct));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(clip_args_type_mismatch)
......@@ -331,7 +330,7 @@ TEST_CASE(clip_args_type_mismatch)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.5, 2, 2, 1.9, 2.5, 3, 2.9, 3.2, 3.7};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(depthtospace_simple_test)
......@@ -349,7 +348,7 @@ TEST_CASE(depthtospace_simple_test)
std::vector<float> gold = {0, 12, 1, 13, 2, 14, 24, 36, 25, 37, 26, 38, 3, 15, 4, 16,
5, 17, 27, 39, 28, 40, 29, 41, 6, 18, 7, 19, 8, 20, 30, 42,
31, 43, 32, 44, 9, 21, 10, 22, 11, 23, 33, 45, 34, 46, 35, 47};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(spacetodepth_simple_test)
......@@ -367,7 +366,7 @@ TEST_CASE(spacetodepth_simple_test)
std::vector<float> gold = {0, 2, 4, 12, 14, 16, 24, 26, 28, 36, 38, 40, 1, 3, 5, 13,
15, 17, 25, 27, 29, 37, 39, 41, 6, 8, 10, 18, 20, 22, 30, 32,
34, 42, 44, 46, 7, 9, 11, 19, 21, 23, 31, 33, 35, 43, 45, 47};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(spacetodepth_depthtospace_test)
......@@ -375,11 +374,11 @@ TEST_CASE(spacetodepth_depthtospace_test)
// space to depth
auto p1 = migraphx::parse_onnx("spacetodepth_simple_test.onnx");
p1.compile(migraphx::make_target("ref"));
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
std::vector<float> gold_data_in(48);
std::iota(std::begin(gold_data_in), std::end(gold_data_in), 0);
migraphx::shape s_x_1{migraphx::shape::float_type, {1, 2, 4, 6}};
migraphx::parameter_map pp1;
pp1["x"] = migraphx::argument(s_x_1, data_in.data());
pp1["x"] = migraphx::argument(s_x_1, gold_data_in.data());
auto result1 = p1.eval(pp1).back();
// depth to space
auto p2 = migraphx::parse_onnx("depthtospace_simple_test.onnx");
......@@ -389,7 +388,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
auto result2 = p2.eval(pp2).back();
std::vector<float> result_vector2;
result2.visit([&](auto output) { result_vector2.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_range(result_vector2, data_in));
EXPECT(migraphx::verify::verify_rms_range(result_vector2, gold_data_in));
}
TEST_CASE(eyelike_verify_test)
......@@ -406,8 +405,8 @@ TEST_CASE(eyelike_verify_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.};
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
std::vector<float> gold_eyelike_mat = {0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold_eyelike_mat));
}
TEST_CASE(eyelike_verify_negk_test)
......@@ -424,8 +423,8 @@ TEST_CASE(eyelike_verify_negk_test)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.};
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
std::vector<float> gold_eyelike_mat = {0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold_eyelike_mat));
}
TEST_CASE(gather_elements)
......@@ -448,7 +447,7 @@ TEST_CASE(gather_elements)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.125, 0.5625, -0.9375, 0.25, 0.5625, 0.9375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(gemm_test)
......@@ -492,7 +491,7 @@ TEST_CASE(gemm_test)
0.8098607, 1.2157929, 1.1010075, 1.0706307, 1.0429881, 1.1771785, 1.2362702,
0.8239243, 1.1112559, 0.9639262, 1.0813537, 0.8825792, 1.121141, 1.1885703,
1.2227502, 1.4568202, 1.1388762, 1.55058, 1.0958102, 1.4637487, 1.5756242};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(gemm_half_test)
......@@ -536,7 +535,7 @@ TEST_CASE(gemm_half_test)
2.143, 2.062, 1.921, 1.836, 2.203, 1.952, 1.055, 1.225, 1.418, 1.209, 1.155,
1.42, 1.234, 1.302, 1.593, 1.368, 1.289, 1.327, 1.451, 1.394};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(greaterorequal_test)
......@@ -557,7 +556,7 @@ TEST_CASE(greaterorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, 0.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(hardsigmoid_verify_test)
......@@ -581,7 +580,7 @@ TEST_CASE(hardsigmoid_verify_test)
std::transform(data.begin(), data.end(), gold.begin(), [&](auto x) {
return std::max(0.0f, std::min(x * alpha + beta, 1.0f));
});
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_else_test)
......@@ -603,7 +602,7 @@ TEST_CASE(if_else_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0866565, -0.371067, 0.017719, 0.0250614, 0.0612539, -0.744683};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_else_test_inlined)
......@@ -622,7 +621,7 @@ TEST_CASE(if_else_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0507132, -0.712328, 0.0105797, 0.04569, 0.0185013, -1.16472};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_test)
......@@ -645,7 +644,7 @@ TEST_CASE(if_then_test)
// onnx adds ones so result should be just + 1.0
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_test_inlined)
......@@ -664,7 +663,7 @@ TEST_CASE(if_then_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_literal_test)
......@@ -689,14 +688,14 @@ TEST_CASE(if_literal_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {1, 2, 3, 4, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {5, 4, 3, 2, 1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
}
......@@ -727,7 +726,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_then_else_multi_output_shapes_test)
......@@ -758,7 +757,7 @@ TEST_CASE(if_then_else_multi_output_shapes_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(if_pl_test)
......@@ -790,14 +789,14 @@ TEST_CASE(if_pl_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {2, 3, 4, 5, 6, 7};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {1, 2, 3, 4, 5, 6};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
}
......@@ -836,8 +835,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(true);
std::vector<float> gold0(4, 2.0f);
std::vector<float> gold1(12, 4.0f);
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_rms_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_rms_range(results.at(1), gold1));
}
// else branch
......@@ -845,8 +844,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(false);
std::vector<float> gold0(4, 3.0f);
std::vector<float> gold1(12, 5.0f);
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_rms_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_rms_range(results.at(1), gold1));
}
}
......@@ -877,7 +876,7 @@ TEST_CASE(instance_norm_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(instance_norm_dyn_batch_test)
......@@ -919,7 +918,7 @@ TEST_CASE(instance_norm_dyn_batch_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(instance_norm_3d_test)
......@@ -948,7 +947,7 @@ TEST_CASE(instance_norm_3d_test)
3.18218,
4.05505};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lessorequal_test)
......@@ -969,7 +968,7 @@ TEST_CASE(lessorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lpnormalization_1norm)
......@@ -997,7 +996,7 @@ TEST_CASE(lpnormalization_1norm)
3.f / 7.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(lpnormalization_2norm)
......@@ -1013,19 +1012,19 @@ TEST_CASE(lpnormalization_2norm)
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> correct{0.f,
2.f / 3.f,
-2.f / 3.f,
1.f / 3.f,
1.f / 6.f,
-5.f / 6.f,
3.f / 6.f,
-1.f / 6.f,
-4.f / 5.f,
3.f / 5.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_range(result_vector, correct));
std::vector<float> gold{0.f,
2.f / 3.f,
-2.f / 3.f,
1.f / 3.f,
1.f / 6.f,
-5.f / 6.f,
3.f / 6.f,
-1.f / 6.f,
-4.f / 5.f,
3.f / 5.f,
0.f,
0.f};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_broadcast_test)
......@@ -1056,7 +1055,7 @@ TEST_CASE(mean_broadcast_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold(24, 3);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_test)
......@@ -1083,7 +1082,7 @@ TEST_CASE(mean_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0.0) / num_data;
std::vector<double> gold(num_elms, mean);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mean_integral_test)
......@@ -1110,7 +1109,7 @@ TEST_CASE(mean_integral_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0) / num_data;
std::vector<int> gold(num_elms, mean);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test)
......@@ -1137,7 +1136,7 @@ TEST_CASE(mod_test)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_different_types)
......@@ -1165,7 +1164,7 @@ TEST_CASE(mod_test_different_types)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod)
......@@ -1194,7 +1193,7 @@ TEST_CASE(mod_test_fmod)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod_different_types)
......@@ -1224,7 +1223,7 @@ TEST_CASE(mod_test_fmod_different_types)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(nonzero_test)
......@@ -1243,7 +1242,289 @@ TEST_CASE(nonzero_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 0, 0, 1, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearadd_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx::program p = migraphx::parse_onnx("qlinearadd_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {64}};
std::vector<uint8_t> data_a = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50,
52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102,
104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126};
migraphx::shape b{migraphx::shape::uint8_type, {64}};
std::vector<uint8_t> data_b = {128, 126, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, 104,
102, 100, 98, 96, 94, 92, 90, 88, 86, 84, 82, 80, 78,
76, 74, 72, 70, 68, 66, 64, 62, 60, 58, 56, 54, 52,
50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26,
24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearadd_bcast_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx::program p = migraphx::parse_onnx("qlinearadd_bcast_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::int8_type, {64}};
std::vector<int8_t> data_a = {-64, -62, -60, -58, -56, -54, -52, -50, -48, -46, -44, -42, -40,
-38, -36, -34, -32, -30, -28, -26, -24, -22, -20, -18, -16, -14,
-12, -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10, 12,
14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38,
40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62};
migraphx::shape b{migraphx::shape::int8_type, {1, 1, 64}};
std::vector<int8_t> data_b = {96, 94, 92, 90, 88, 86, 84, 82, 80, 78, 76, 74, 72,
70, 68, 66, 64, 62, 60, 58, 56, 54, 52, 50, 48, 46,
44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20,
18, 16, 14, 12, 10, 8, 6, 4, 2, 0, -2, -4, -6,
-8, -10, -12, -14, -16, -18, -20, -22, -24, -26, -28, -30};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<int8_t> gold = {-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64,
-64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64, -64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 7, 7}};
std::vector<uint8_t> x_data = {255, 174, 162, 25, 203, 168, 58, 15, 59, 237, 95, 129, 0,
64, 56, 242, 153, 221, 168, 12, 166, 232, 178, 186, 195, 237,
162, 237, 188, 39, 124, 77, 80, 102, 43, 127, 230, 21, 83,
41, 40, 134, 255, 154, 92, 141, 42, 148, 247};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {0, 81, 93, 230, 52, 87, 197, 240, 196, 18, 160, 126, 255,
191, 199, 13, 102, 34, 87, 243, 89, 23, 77, 69, 60, 18,
93, 18, 67, 216, 131, 178, 175, 153, 212, 128, 25, 234, 172,
214, 215, 121, 0, 101, 163, 114, 213, 107, 8};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_0_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_0_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 3, 3) output tensor
std::vector<int8_t> gold = {-43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_1_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_1_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 5, 5) output tensor
std::vector<uint8_t> gold = {19, 33, 43, 52, 38, 52, 85, 99, 113, 80, 99, 156, 170,
184, 128, 146, 227, 241, 255, 175, 113, 175, 184, 194, 132};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_scale_1D_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_scale_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 2, 3, 3) output tensor
std::vector<int8_t> gold = {
-43, -29, -15, 28, 42, 56, 99, 113, 127, -43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearglobalavgpool_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md
// #com.microsoft.QLinearGlobalAveragePool
migraphx::program p = migraphx::parse_onnx("qlinearglobalavgpool_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_x{migraphx::shape::uint8_type, {1, 3, 4, 4}};
std::vector<uint8_t> data_x = {160, 156, 152, 148, 144, 140, 136, 132, 124, 120, 116, 112,
108, 104, 100, 96, 64, 72, 80, 88, 96, 104, 112, 120,
136, 144, 152, 160, 168, 176, 184, 192, 120, 121, 122, 123,
124, 125, 126, 127, 129, 130, 131, 132, 133, 134, 135, 136};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sh_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_2D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_2D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {1, 8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8, 1}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_3D_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearMatMul.html
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_3D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {2, 2, 4}};
std::vector<uint8_t> data_a = {
208, 236, 0, 238, 3, 214, 255, 29, 208, 236, 0, 238, 3, 214, 255, 29};
migraphx::shape b{migraphx::shape::uint8_type, {2, 4, 3}};
std::vector<uint8_t> data_b = {152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247,
152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {168, 115, 255, 1, 66, 151, 168, 115, 255, 1, 66, 151};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_downsample_f_test)
......@@ -1264,7 +1545,7 @@ TEST_CASE(resize_downsample_f_test)
std::vector<float> gold = {0.0f, 3.0f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_ac_test)
......@@ -1299,7 +1580,7 @@ TEST_CASE(resize_upsample_linear_ac_test)
11.0f / 3,
4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_test)
......@@ -1320,7 +1601,7 @@ TEST_CASE(resize_upsample_linear_test)
std::vector<float> gold = {
1, 1.25, 1.75, 2, 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25, 3.5, 3, 3.25, 3.75, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_upsample_pf_test)
......@@ -1341,7 +1622,7 @@ TEST_CASE(resize_upsample_pf_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_4D_verify_test)
......@@ -1362,7 +1643,7 @@ TEST_CASE(reversesequence_4D_verify_test)
std::vector<float> gold = {
8.0, 9.0, 10.0, 11.0, 4.0, 5.0, 6.0, 7.0, 0.0, 1.0, 2.0, 3.0, 12.0, 13.0, 14.0, 15.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_batch_verify_test)
......@@ -1383,7 +1664,7 @@ TEST_CASE(reversesequence_batch_verify_test)
std::vector<float> gold = {
0.0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, 7.0, 10.0, 9.0, 8.0, 11.0, 15.0, 14.0, 13.0, 12.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(reversesequence_time_verify_test)
......@@ -1404,7 +1685,7 @@ TEST_CASE(reversesequence_time_verify_test)
std::vector<float> gold = {
3.0, 6.0, 9.0, 12.0, 2.0, 5.0, 8.0, 13.0, 1.0, 4.0, 10.0, 14.0, 0.0, 7.0, 11.0, 15.0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(selu_test)
......@@ -1424,7 +1705,7 @@ TEST_CASE(selu_test)
std::vector<float> gold = {0.55, 1.05, 0, -0.10912, -0.149251, 6};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(size_verify_test)
......@@ -1458,7 +1739,7 @@ TEST_CASE(slice_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2, 3};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_5arg_test)
......@@ -1478,7 +1759,7 @@ TEST_CASE(slice_5arg_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {10, 11, 12, 13, 15, 16, 17, 18};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_reverse_test)
......@@ -1498,7 +1779,7 @@ TEST_CASE(slice_reverse_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 13, 12, 11, 19, 18, 17, 16};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(slice_step_test)
......@@ -1518,7 +1799,7 @@ TEST_CASE(slice_step_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(softplus_test)
......@@ -1539,7 +1820,7 @@ TEST_CASE(softplus_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return std::log1p(std::exp(x)); });
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(softsign_test)
......@@ -1560,7 +1841,7 @@ TEST_CASE(softsign_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return x / (1.0 + std::abs(x)); });
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(upsample_test)
......@@ -1579,7 +1860,7 @@ TEST_CASE(upsample_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(where_test)
......@@ -1621,7 +1902,7 @@ TEST_CASE(where_test)
2.0f,
1.0f,
2.0f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::program& p)
......@@ -1646,7 +1927,7 @@ TEST_CASE(trilu_test)
std::vector<float> gold = {1, 2, 3, 4, 0, 6, 7, 8, 0, 0, 11, 12};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
......@@ -1657,7 +1938,7 @@ TEST_CASE(trilu_batch_diff_k_test)
std::vector<float> gold = {0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
......@@ -1668,7 +1949,7 @@ TEST_CASE(trilu_lower_test)
std::vector<float> gold = {0, 0, 0, 0, 5, 0, 0, 0, 9, 10, 0, 0};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
......@@ -1679,7 +1960,7 @@ TEST_CASE(trilu_out_k_test)
std::vector<float> gold(12, 0);
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
......@@ -1690,7 +1971,7 @@ TEST_CASE(trilu_row_one_test)
std::vector<float> gold = {0, 2, 3, 4};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -24,7 +24,8 @@
#include <migraphx/program.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/permutation.hpp>
#include <migraphx/op/common.hpp>
#include <sstream>
#include <migraphx/make_op.hpp>
......@@ -81,6 +82,48 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(allocate_static)
{
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}));
}
TEST_CASE(allocate_static_input_error)
{
migraphx::shape input{migraphx::shape::int64_type, {3}};
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}), input);
}
TEST_CASE(allocate_dyn)
{
migraphx::shape input{migraphx::shape::int64_type, {2}};
auto max_val = std::numeric_limits<std::size_t>::max();
std::vector<migraphx::shape::dynamic_dimension> dyn_dims(
2, migraphx::shape::dynamic_dimension{0, max_val});
expect_shape(migraphx::shape{migraphx::shape::float_type, dyn_dims},
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}),
input);
}
TEST_CASE(allocate_dyn_with_shape_attr)
{
migraphx::shape input{migraphx::shape::int64_type, {4}};
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
expect_shape(shape_attr,
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}),
input);
}
TEST_CASE(allocate_dyn_no_input_error)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
expect_shape(shape_attr,
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}));
}
TEST_CASE(argmax_axis0)
{
migraphx::shape input{migraphx::shape::half_type, {2, 3, 4, 5}};
......@@ -156,13 +199,13 @@ TEST_CASE(broadcast)
{
std::vector<std::size_t> lens{1, 1};
migraphx::shape input{migraphx::shape::float_type, {2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", lens}}), input);
}
{
std::vector<std::size_t> lens{2, 2};
migraphx::shape input{migraphx::shape::float_type, {1, 2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", lens}}), input);
}
{
......@@ -862,6 +905,50 @@ TEST_CASE(flatten_dyn_axis4)
input);
}
TEST_CASE(fill_static_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type, {3, 4, 4}};
expect_shape(migraphx::shape{migraphx::shape::int64_type, {3, 4, 4}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_static_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type, {4, 8}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 8}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(gather)
{
{
......@@ -1252,36 +1339,45 @@ TEST_CASE(inconsistent_attr_shape)
input);
}
template <class T>
void test_softmax_variations()
void test_softmax_variations(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{0}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 0}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{1}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 1}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{2}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 2}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{3}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::make_op(name, {{"axis", 3}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 4;
throws_shape(T{axis}, input);
throws_shape(migraphx::make_op(name, {{"axis", axis}}), input);
}
}
TEST_CASE(logsoftmax) { test_softmax_variations<migraphx::op::logsoftmax>(); }
TEST_CASE(logsoftmax) { test_softmax_variations("logsoftmax"); }
TEST_CASE(softmax) { test_softmax_variations("softmax"); }
TEST_CASE(lstm)
{
......@@ -2106,6 +2202,13 @@ TEST_CASE(pooling_shape3)
input);
}
TEST_CASE(pooling_shape4)
{
migraphx::shape tiny_input{migraphx::shape::float_type, {4, 1}};
throws_shape(migraphx::make_op("pooling", {{"mode", migraphx::op::pooling_mode::max}}),
tiny_input);
}
TEST_CASE(pooling_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3, {3}}, {3, 3, {3}}, {3, 3}}};
......@@ -2216,6 +2319,20 @@ TEST_CASE(prefix_scan_sum_dyn_2d)
}
}
TEST_CASE(random_uniform)
{
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}, {3, 7}};
migraphx::shape s0{migraphx::shape::uint64_type, {1}};
migraphx::shape s1{migraphx::shape::float_type, dd};
expect_shape(s1, migraphx::make_op("random_uniform"), s0, s1);
}
TEST_CASE(random_seed)
{
migraphx::shape s{migraphx::shape::uint64_type, {1}, {0}};
expect_shape(s, migraphx::make_op("random_seed"));
}
TEST_CASE(quant_convolution_shape)
{
migraphx::shape output{migraphx::shape::int32_type, {4, 4, 1, 1}};
......@@ -2328,47 +2445,54 @@ TEST_CASE(dqlinear_mismatch_type)
throws_shape(migraphx::make_op("dequantizelinear"), input, scales, zeros);
}
template <class T>
void test_reduce_ops()
void test_reduce_ops(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}},
migraphx::make_op(name),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}}, T{{0, 1, 2, 3}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 1}},
migraphx::make_op(name, {{"axes", {0, 1, 2, 3}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 1, 1}}, T{{2, 3}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 1, 1}},
migraphx::make_op(name, {{"axes", {2, 3}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}}, T{{0}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 3, 4, 5}},
migraphx::make_op(name, {{"axes", {0}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 1}}, T{{-1}}, input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 1}},
migraphx::make_op(name, {{"axes", {-1}}}),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
throws_shape(T{{4}}, input);
throws_shape(migraphx::make_op(name, {{"axes", {4}}}), input);
}
}
// dynamic shape
template <class T>
void test_dyn_reduce_ops()
void test_dyn_reduce_ops(const std::string& name)
{
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, {3}}, {2, 4, {4}}}};
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{2, 3, {3}}, {1, 1}})},
T{{-1}},
migraphx::make_op(name, {{"axes", {-1}}}),
input);
}
{
......@@ -2376,7 +2500,7 @@ void test_dyn_reduce_ops()
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{1, 1}, {2, 4, {4}}})},
T{{0}},
migraphx::make_op(name, {{"axes", {0}}}),
input);
}
{
......@@ -2385,24 +2509,24 @@ void test_dyn_reduce_ops()
expect_shape(
migraphx::shape{migraphx::shape::float_type,
std::vector<migraphx::shape::dynamic_dimension>({{1, 1}, {1, 1}})},
T{{}},
migraphx::make_op(name),
input);
}
{
migraphx::shape input{migraphx::shape::float_type, {{2, 3, {3}}, {2, 4, {4}}}};
throws_shape(T{{4}}, input);
throws_shape(migraphx::make_op(name, {{"axes", {4}}}), input);
}
}
TEST_CASE(reduce_max) { test_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean) { test_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod) { test_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum) { test_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reduce_max) { test_reduce_ops("reduce_max"); }
TEST_CASE(reduce_mean) { test_reduce_ops("reduce_mean"); }
TEST_CASE(reduce_prod) { test_reduce_ops("reduce_prod"); }
TEST_CASE(reduce_sum) { test_reduce_ops("reduce_sum"); }
TEST_CASE(reduce_max_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_max>(); }
TEST_CASE(reduce_mean_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_mean>(); }
TEST_CASE(reduce_prod_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_prod>(); }
TEST_CASE(reduce_sum_dyn) { test_dyn_reduce_ops<migraphx::op::reduce_sum>(); }
TEST_CASE(reduce_max_dyn) { test_dyn_reduce_ops("reduce_max"); }
TEST_CASE(reduce_mean_dyn) { test_dyn_reduce_ops("reduce_mean"); }
TEST_CASE(reduce_prod_dyn) { test_dyn_reduce_ops("reduce_prod"); }
TEST_CASE(reduce_sum_dyn) { test_dyn_reduce_ops("reduce_sum"); }
TEST_CASE(reshape_shape)
{
......@@ -2415,13 +2539,21 @@ TEST_CASE(reshape_shape)
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_invalid)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_minus1_reshapes)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
......@@ -2545,14 +2677,14 @@ TEST_CASE(reshape_broadcast_squeeze)
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_broadcast_squeeze_error)
TEST_CASE(reshape_broadcast_squeeze_memlayout_change)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
migraphx::shape output{migraphx::shape::float_type, {2, 16, 256, 80}, {0, 0, 0, 16}};
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_dyn_shape)
TEST_CASE(reshape_dyn_1in)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
for(auto&& new_shape : std::vector<std::vector<int64_t>>{
......@@ -2576,6 +2708,27 @@ TEST_CASE(reshape_dyn_shape)
}
}
TEST_CASE(reshape_dyn_2in_0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
migraphx::shape output{migraphx::shape::float_type, {{1, 4}, {8, 8}, {3, 3}, {1, 1}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_dyn_2in_1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
migraphx::shape output{migraphx::shape::float_type, {{12, 12}, {2, 2}, {1, 1}, {1, 4}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_dyn_2in_2)
{
migraphx::shape input{migraphx::shape::float_type, {2, 24, 1, 1}};
migraphx::shape output{migraphx::shape::float_type, {{1, 2}, {6, 12}, {1, 1}, {4, 4}}};
expect_shape(output, migraphx::make_op("reshape"), input, output);
}
TEST_CASE(reshape_multiple_non_fixed_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 20}, {1, 1}}};
......@@ -2597,6 +2750,199 @@ TEST_CASE(reshape_non_fixed_not_matching_error)
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_shape)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 1, 1}, {1, 3, 4, 2}, {1, 3, 4, 2}})
{
std::vector<std::size_t> lens(new_shape.size());
std::copy(new_shape.begin(), new_shape.end(), lens.begin());
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, -1, 0}, {migraphx::shape::float_type, {2, 12, 1}}},
{{0, 0, -1}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, 0, -1}, {migraphx::shape::float_type, {2, 1, 12}}},
{{-1, 2, 3}, {migraphx::shape::float_type, {4, 2, 3}}},
{{-1, 0, 3}, {migraphx::shape::float_type, {8, 1, 3}}},
{{-1, 0, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{-1, 3, 0}, {migraphx::shape::float_type, {8, 3, 1}}}};
for(auto& it : minus1_tests)
{
expect_shape(it.second, migraphx::make_op("reshape_lazy", {{"dims", it.first}}), input);
}
}
// This uses the permutation to compute the reshape_lazy since its simpler than
// trying to calculate strides. As we collapse or expand dimensions, we
// remove the collapsed dimensions or duplicate the expanded dimensions in
// the permutation. Then we renumber the permutation. So for dimensions of 4,
// 24, 1, 1, 1 with a permutation of 1, 0, 2, 3, 4 that reshape_lazys to 4, 1, 3,
// 4, 2, we first remove the collapsed dimensions or duplicate the expanded
// dimensions which gives 1, 0, 0, 0, 0. Then after renumbering we get a
// final permutation of 4, 0, 1, 2, 3.
TEST_CASE(reshape_lazy_nonstandard)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
std::vector<std::pair<std::vector<std::size_t>, std::vector<int64_t>>> tests{
{{4, 24}, {1, 0}},
{{4, 24, 1, 1, 1, 1}, {1, 0, 2, 3, 4, 5}},
{{4, 8, 3, 1, 1}, {2, 0, 1, 3, 4}},
{{4, 1, 3, 4, 2}, {4, 0, 1, 2, 3}},
{{4, 1, 4, 3, 2}, {4, 0, 1, 2, 3}},
{{4, 2, 4, 3}, {3, 0, 1, 2}},
{{4, 2, 12, 1}, {2, 0, 1, 3}},
{{4, 2, 1, 12}, {3, 0, 1, 2}},
{{4, 4, 2, 3}, {3, 0, 1, 2}},
{{4, 8, 1, 3}, {3, 0, 1, 2}},
{{4, 8, 3, 1}, {2, 0, 1, 3}}};
for(const auto& [dims, perm] : tests)
{
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, dims, migraphx::invert_permutation(perm));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", dims}}), input);
}
}
TEST_CASE(reshape_lazy_nonstandard_squeeze)
{
auto input = migraphx::shape::from_permutation(
migraphx::shape::float_type, {2, 16, 16, 1280}, migraphx::invert_permutation({0, 2, 3, 1}));
std::vector<std::size_t> lens = {2, 256, 1280};
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, lens, migraphx::invert_permutation({0, 2, 1}));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", lens}}), input);
}
TEST_CASE(reshape_lazy_nonstandard_error)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
for(auto&& new_shape : std::vector<std::vector<int64_t>>{{4, 8, 3, 2, 2},
{1},
{4, 8, 4},
{4, 24, 1, 1, 1, 1, 2},
{8, 4, 4},
{4, 1, 3, -1, -1},
{4, 3, 0},
{4, 3, 2},
{3, 0},
{3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {4, 2, 8}, {32, 16, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {2, 2, 16}, {64, 32, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {64}, {2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 16, 80}, {0, 0, 80, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_dyn_shape)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
for(auto&& new_shape : std::vector<std::vector<int64_t>>{
{-1, 1, 1, 24}, {0, 8, 3, 1}, {-1, 3, 4, 2}, {0, 2, 4, 3}})
{
std::vector<migraphx::shape::dynamic_dimension> out_dyn_dims{};
for(std::size_t i = 0; i < new_shape.size(); ++i)
{
if(new_shape[i] == 0 or new_shape[i] == -1)
{
out_dyn_dims.push_back(input.dyn_dims().at(i));
}
else
{
std::size_t d = new_shape[i];
out_dyn_dims.push_back({d, d});
}
}
migraphx::shape output{migraphx::shape::float_type, out_dyn_dims};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_multiple_non_fixed_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 20}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 0, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_fixed_ele_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 10}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 5, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_non_fixed_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
std::vector<int64_t> new_shape = {2, 1, 1, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(return_shape_tuple)
{
using migraphx::shape;
......@@ -2822,7 +3168,7 @@ TEST_CASE(select_module_dyn)
input);
}
TEST_CASE(slice_shape)
TEST_CASE(slice_static_shape)
{
migraphx::shape input{migraphx::shape::int32_type, {2, 2, 3}};
expect_shape(migraphx::shape{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}},
......@@ -2840,6 +3186,67 @@ TEST_CASE(slice_shape)
input);
}
TEST_CASE(slice_var_inputs_static_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"axes", {1, 2}}}),
input,
starts,
ends);
}
TEST_CASE(slice_var_inputs_static_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice"),
input,
starts,
ends,
axes);
}
TEST_CASE(slice_var_inputs_static_error0)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {3}};
throws_shape(migraphx::make_op("slice"), input, starts, ends, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {2, 4, {2, 4}}, {2, 4, {2, 4}}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"axes", {1, 2}}}),
input,
starts,
ends);
}
TEST_CASE(slice_var_inputs_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {2, 4, {2, 4}}, {2, 4, {2, 4}}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 4}, {0, 4}}},
migraphx::make_op("slice"),
input,
starts,
ends,
axes);
}
TEST_CASE(slice_dyn_shape0)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3}, {7, 7}, {2, 3}}};
......@@ -2870,7 +3277,7 @@ TEST_CASE(slice_dyn_shape2)
TEST_CASE(slice_dyn_shape3)
{
// TODO: When variable dimension slicing is allowed, Slice to a size smaller than min.
// TODO: When non-fixed dimension slicing is allowed, Slice to a size smaller than min.
// Until then, this action is an error.
migraphx::shape input{migraphx::shape::int32_type, {{2, 3}, {7, 8}, {2, 3}}};
throws_shape(migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {1}}}),
......@@ -2901,8 +3308,6 @@ TEST_CASE(slice_dyn_shape5)
input);
}
TEST_CASE(softmax) { test_softmax_variations<migraphx::op::softmax>(); }
TEST_CASE(softmax_dyn0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/optimize_module.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/serialize.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m) { migraphx::run_passes(m, {migraphx::optimize_module{}}); }
TEST_CASE(broadcast_transpose_inner_broadcast)
{
// first optimizes broadcast+transpose to just a broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 2, 3}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb1);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb2, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mul = m2.add_instruction(migraphx::make_op("mul"), l2, l1);
auto mb =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), mul);
m2.add_return({mb});
}
EXPECT(m1 == m2);
}
TEST_CASE(broadcast_transpose_inner_broadcast_generic)
{
// first optimizes broadcast+transpose to unsqueeze+transpose+broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {5}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 10, 5}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb2);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb1, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {5}});
auto unsqueeze = m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), l2);
auto transpose = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), unsqueeze);
auto mb1 =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), l1);
auto mb2 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), transpose);
auto mul = m2.add_instruction(migraphx::make_op("mul"), mb1, mb2);
auto mb3 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), mul);
m2.add_return({mb3});
}
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/pad_calc.hpp>
#include "test.hpp"
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -23,8 +23,14 @@
#####################################################################################
include(PythonModules)
set(VENV ${CMAKE_BINARY_DIR}/test/py/venv)
set(VENV_ONNX ${CMAKE_BINARY_DIR}/test/py/venv-onnx)
set(REQUIREMENTS ${CMAKE_CURRENT_SOURCE_DIR}/requirements.txt)
set(REQUIREMENTS_ONNX ${CMAKE_CURRENT_SOURCE_DIR}/requirements-onnx.txt)
set(PYTHON_VERSION_TO_DISABLE_ONNX 3.6)
function(add_py_test NAME SCRIPT)
function(add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
......@@ -32,28 +38,57 @@ function(add_py_test NAME SCRIPT)
"MALLOC_CHECK_=3"
)
set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE})
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
if(NOT TEST py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env)
if (NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION} --clear)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set(PYTHON_EXECUTABLE ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION}/bin/python)
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install -r ${REQUIREMENTS_FILE})
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
endif()
endforeach()
endfunction()
function(add_py_test NAME SCRIPT FIXTURE_NAME VENV_DIR)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
set(PYTHON_EXECUTABLE ${VENV_DIR}/${PYTHON_VERSION}/bin/python)
if(NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
set_tests_properties(test_py_${PYTHON_VERSION}_${NAME} PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
endif()
endforeach()
endfunction()
add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py)
add_py_test(ref test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(op test_op.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_venv_fixture(common ${VENV} ${REQUIREMENTS})
add_py_venv_fixture(onnx ${VENV_ONNX} ${REQUIREMENTS_ONNX})
add_py_test(ref test_cpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(op test_op.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_async test_gpu_async.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_offload test_gpu_offload.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py onnx ${VENV_ONNX} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_async test_gpu_async.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
endif()
......@@ -64,48 +64,1059 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
def disabled_tests_onnx_1_7_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_argmax_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmax_negative_axis_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmax_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_argmin_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_negative_axis_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
backend_test.exclude(r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
backend_test.exclude(r'test_nonmaxsuppression_center_point_box_format_cpu')
backend_test.exclude(r'test_nonmaxsuppression_flipped_coordinates_cpu')
backend_test.exclude(r'test_nonmaxsuppression_identical_boxes_cpu')
backend_test.exclude(r'test_nonmaxsuppression_limit_output_size_cpu')
backend_test.exclude(
r'test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu')
backend_test.exclude(r'test_nonmaxsuppression_suppress_by_IOU_cpu')
backend_test.exclude(r'test_nonmaxsuppression_two_batches_cpu')
backend_test.exclude(r'test_nonmaxsuppression_two_classes_cpu')
backend_test.exclude(r'test_nonzero_example_cpu')
backend_test.exclude(r'test_round_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_ConvTranspose2d_cpu')
backend_test.exclude(r'test_ConvTranspose2d_no_bias_cpu')
def disabled_tests_onnx_1_8_1(backend_test):
backend_test.exclude(r'test_if_seq_cpu')
# from OnnxBackendPyTorchOperatorModelTest
backend_test.exclude(r'test_operator_add_broadcast_cpu')
backend_test.exclude(r'test_operator_add_size1_right_broadcast_cpu')
backend_test.exclude(r'test_operator_addconstant_cpu')
backend_test.exclude(r'test_operator_convtranspose_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bitshift_left_uint16_cpu')
backend_test.exclude(r'test_bitshift_left_uint32_cpu')
backend_test.exclude(r'test_bitshift_left_uint64_cpu')
backend_test.exclude(r'test_bitshift_left_uint8_cpu')
backend_test.exclude(r'test_bitshift_right_uint16_cpu')
backend_test.exclude(r'test_bitshift_right_uint32_cpu')
backend_test.exclude(r'test_bitshift_right_uint64_cpu')
backend_test.exclude(r'test_bitshift_right_uint8_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_STRING_cpu')
backend_test.exclude(r'test_cast_STRING_to_FLOAT_cpu')
backend_test.exclude(r'test_compress_0_cpu')
backend_test.exclude(r'test_compress_1_cpu')
backend_test.exclude(r'test_compress_default_axis_cpu')
backend_test.exclude(r'test_compress_negative_axis_cpu')
backend_test.exclude(r'test_constant_pad_cpu')
backend_test.exclude(r'test_convinteger_with_padding_cpu')
backend_test.exclude(r'test_convtranspose_1d_cpu')
backend_test.exclude(r'test_det_2d_cpu')
backend_test.exclude(r'test_det_nd_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_max_adjusted_cpu')
backend_test.exclude(r'test_dynamicquantizelinear_min_adjusted_cpu')
backend_test.exclude(r'test_edge_pad_cpu')
backend_test.exclude(r'test_einsum_batch_diagonal_cpu')
backend_test.exclude(r'test_einsum_batch_matmul_cpu')
backend_test.exclude(r'test_einsum_inner_prod_cpu')
backend_test.exclude(r'test_einsum_sum_cpu')
backend_test.exclude(r'test_einsum_transpose_cpu')
backend_test.exclude(r'test_hardmax_axis_0_cpu')
backend_test.exclude(r'test_hardmax_axis_1_cpu')
backend_test.exclude(r'test_hardmax_axis_2_cpu')
backend_test.exclude(r'test_hardmax_default_axis_cpu')
backend_test.exclude(r'test_hardmax_example_cpu')
backend_test.exclude(r'test_hardmax_negative_axis_cpu')
backend_test.exclude(r'test_hardmax_one_hot_cpu')
backend_test.exclude(r'test_isinf_cpu')
backend_test.exclude(r'test_isinf_negative_cpu')
backend_test.exclude(r'test_isinf_positive_cpu')
backend_test.exclude(r'test_matmulinteger_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_maxunpool_export_with_output_shape_cpu')
backend_test.exclude(r'test_maxunpool_export_without_output_shape_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int32_cpu')
backend_test.exclude(r'test_mod_mixed_sign_int8_cpu')
backend_test.exclude(r'test_mvn_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_iinput_shape_is_NCd1_weight_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NC_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_mean_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1_weight_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_cpu')
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_no_weight_reduction_mean_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_mean_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_sum_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_mean_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_mean_weight_cpu'
)
backend_test.exclude(
r'test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_none_no_weight_cpu'
)
backend_test.exclude(r'test_qlinearconv_cpu')
backend_test.exclude(r'test_qlinearmatmul_2D_cpu')
backend_test.exclude(r'test_qlinearmatmul_3D_cpu')
backend_test.exclude(r'test_range_float_type_positive_delta_expanded_cpu')
backend_test.exclude(r'test_range_int32_type_negative_delta_expanded_cpu')
backend_test.exclude(r'test_reflect_pad_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_cubic_align_corners_cpu')
backend_test.exclude(r'test_resize_downsample_scales_cubic_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_linear_align_corners_cpu')
backend_test.exclude(r'test_resize_downsample_scales_linear_cpu')
backend_test.exclude(r'test_resize_downsample_scales_nearest_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_cubic_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_nearest_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_cubic_align_corners_cpu')
backend_test.exclude(r'test_resize_upsample_scales_cubic_asymmetric_cpu')
backend_test.exclude(r'test_resize_upsample_scales_cubic_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_linear_align_corners_cpu')
backend_test.exclude(r'test_resize_upsample_scales_linear_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_cubic_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_floor_align_corners_cpu')
backend_test.exclude(
r'test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu')
backend_test.exclude(r'test_reversesequence_batch_cpu')
backend_test.exclude(r'test_reversesequence_time_cpu')
backend_test.exclude(r'test_scan9_sum_cpu')
backend_test.exclude(r'test_scan_sum_cpu')
backend_test.exclude(r'test_shrink_hard_cpu')
backend_test.exclude(r'test_shrink_soft_cpu')
backend_test.exclude(r'test_slice_cpu')
backend_test.exclude(r'test_slice_default_axes_cpu')
backend_test.exclude(r'test_slice_default_steps_cpu')
backend_test.exclude(r'test_slice_end_out_of_bounds_cpu')
backend_test.exclude(r'test_slice_neg_cpu')
backend_test.exclude(r'test_slice_neg_steps_cpu')
backend_test.exclude(r'test_slice_negative_axes_cpu')
backend_test.exclude(r'test_slice_start_out_of_bounds_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu'
)
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_3d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_mean_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_log_prob_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_log_prob_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_no_weight_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(r'test_softmax_cross_entropy_mean_weight_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_3d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_4d_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_ignore_index_log_prob_expanded_cpu'
)
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_none_weights_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_expanded_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_none_weights_log_prob_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_expanded_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_sum_log_prob_cpu')
backend_test.exclude(
r'test_softmax_cross_entropy_sum_log_prob_expanded_cpu')
backend_test.exclude(r'test_split_zero_size_splits_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_lower_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_nochangecase_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_casesensintive_upper_cpu')
backend_test.exclude(r'test_strnormalizer_export_monday_empty_output_cpu')
backend_test.exclude(
r'test_strnormalizer_export_monday_insensintive_upper_twodim_cpu')
backend_test.exclude(r'test_strnormalizer_nostopwords_nochangecase_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu')
backend_test.exclude(
r'test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_only_bigrams_skip0_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_onlybigrams_skip5_cpu')
backend_test.exclude(r'test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu')
backend_test.exclude(r'test_top_k_cpu')
backend_test.exclude(r'test_top_k_negative_axis_cpu')
backend_test.exclude(r'test_top_k_smallest_cpu')
backend_test.exclude(r'test_unique_not_sorted_without_axis_cpu')
backend_test.exclude(r'test_unique_sorted_with_axis_3d_cpu')
backend_test.exclude(r'test_unique_sorted_with_axis_cpu')
backend_test.exclude(r'test_unique_sorted_with_negative_axis_cpu')
backend_test.exclude(r'test_unique_sorted_without_axis_cpu')
backend_test.exclude(r'test_upsample_nearest_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_PReLU_1d_multiparam_cpu')
backend_test.exclude(r'test_PReLU_2d_multiparam_cpu')
backend_test.exclude(r'test_PReLU_3d_multiparam_cpu')
backend_test.exclude(r'test_ReplicationPad2d_cpu')
# from OnnxBackendPyTorchOperatorModelTest
backend_test.exclude(r'test_operator_add_size1_broadcast_cpu')
backend_test.exclude(r'test_operator_add_size1_singleton_broadcast_cpu')
# from OnnxBackendSimpleModelTest
backend_test.exclude(r'test_gradient_of_add_and_mul_cpu')
backend_test.exclude(r'test_gradient_of_add_cpu')
backend_test.exclude(r'test_sequence_model1_cpu')
backend_test.exclude(r'test_sequence_model2_cpu')
backend_test.exclude(r'test_sequence_model3_cpu')
backend_test.exclude(r'test_sequence_model4_cpu')
backend_test.exclude(r'test_sequence_model5_cpu')
backend_test.exclude(r'test_sequence_model6_cpu')
backend_test.exclude(r'test_sequence_model7_cpu')
backend_test.exclude(r'test_sequence_model8_cpu')
backend_test.exclude(r'test_shrink_cpu')
backend_test.exclude(r'test_strnorm_model_monday_casesensintive_lower_cpu')
backend_test.exclude(
r'test_strnorm_model_monday_casesensintive_nochangecase_cpu')
backend_test.exclude(r'test_strnorm_model_monday_casesensintive_upper_cpu')
backend_test.exclude(r'test_strnorm_model_monday_empty_output_cpu')
backend_test.exclude(
r'test_strnorm_model_monday_insensintive_upper_twodim_cpu')
backend_test.exclude(r'test_strnorm_model_nostopwords_nochangecase_cpu')
def disabled_tests_onnx_1_8_0(backend_test):
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_cast_BFLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_BFLOAT16_cpu')
backend_test.exclude(r'test_if_seq_cpu')
backend_test.exclude(r'test_loop11_cpu')
backend_test.exclude(r'test_loop13_seq_cpu')
backend_test.exclude(r'test_nllloss_NC_cpu')
backend_test.exclude(r'test_nllloss_NCd1_cpu')
backend_test.exclude(r'test_nllloss_NCd1_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1_mean_weight_negative_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1_weight_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_reduction_mean_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_reduction_sum_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_reduction_mean_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2_with_weight_reduction_sum_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2_with_weight_reduction_sum_ii_cpu')
backend_test.exclude(
r'test_nllloss_NCd1d2d3_none_no_weight_negative_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3_sum_weight_high_ii_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3d4d5_mean_weight_cpu')
backend_test.exclude(r'test_nllloss_NCd1d2d3d4d5_none_no_weight_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_none_no_weight_negative_ii_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_expanded_cpu')
backend_test.exclude(r'test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu')
backend_test.exclude(
r'test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_3d_cpu')
backend_test.exclude(r'test_sce_mean_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_3d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_cpu')
backend_test.exclude(r'test_sce_mean_expanded_cpu')
backend_test.exclude(r'test_sce_mean_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_3d_log_prob_cpu')
backend_test.exclude(
r'test_sce_mean_no_weight_ii_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_4d_log_prob_cpu')
backend_test.exclude(
r'test_sce_mean_no_weight_ii_4d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_expanded_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_no_weight_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_cpu')
backend_test.exclude(r'test_sce_mean_weight_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_3d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_4d_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_ii_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_mean_weight_log_prob_cpu')
backend_test.exclude(r'test_sce_mean_weight_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_none_cpu')
backend_test.exclude(r'test_sce_none_expanded_cpu')
backend_test.exclude(r'test_sce_none_log_prob_cpu')
backend_test.exclude(r'test_sce_none_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_none_weights_cpu')
backend_test.exclude(r'test_sce_none_weights_expanded_cpu')
backend_test.exclude(r'test_sce_none_weights_log_prob_cpu')
backend_test.exclude(r'test_sce_none_weights_log_prob_expanded_cpu')
backend_test.exclude(r'test_sce_sum_cpu')
backend_test.exclude(r'test_sce_sum_expanded_cpu')
backend_test.exclude(r'test_sce_sum_log_prob_cpu')
backend_test.exclude(r'test_sce_sum_log_prob_expanded_cpu')
backend_test.exclude(r'test_sequence_insert_at_back_cpu')
backend_test.exclude(r'test_sequence_insert_at_front_cpu')
backend_test.exclude(r'test_split_variable_parts_1d_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_cpu')
def disabled_tests_onnx_1_9_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_gru_batchwise_cpu')
backend_test.exclude(r'test_lstm_batchwise_cpu')
backend_test.exclude(r'test_simple_rnn_batchwise_cpu')
backend_test.exclude(r'test_tril_cpu')
backend_test.exclude(r'test_tril_one_row_neg_cpu')
backend_test.exclude(r'test_tril_square_cpu')
# from OnnxBackendPyTorchConvertedModelTest
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_convinteger_without_padding_cpu')
backend_test.exclude(r'test_convtranspose_autopad_same_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_tril_neg_cpu')
backend_test.exclude(r'test_tril_out_neg_cpu')
backend_test.exclude(r'test_tril_out_pos_cpu')
backend_test.exclude(r'test_tril_pos_cpu')
backend_test.exclude(r'test_tril_square_neg_cpu')
backend_test.exclude(r'test_tril_zero_cpu')
backend_test.exclude(r'test_triu_neg_cpu')
backend_test.exclude(r'test_triu_one_row_cpu')
backend_test.exclude(r'test_triu_out_neg_out_cpu')
backend_test.exclude(r'test_triu_out_pos_cpu')
backend_test.exclude(r'test_triu_pos_cpu')
backend_test.exclude(r'test_triu_square_neg_cpu')
backend_test.exclude(r'test_triu_zero_cpu')
def disabled_tests_onnx_1_10_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bernoulli_double_expanded_cpu')
backend_test.exclude(r'test_bernoulli_expanded_cpu')
backend_test.exclude(r'test_bernoulli_seed_expanded_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bernoulli_cpu')
backend_test.exclude(r'test_bernoulli_double_cpu')
backend_test.exclude(r'test_bernoulli_seed_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_expanded_cpu')
backend_test.exclude(r'test_castlike_STRING_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_STRING_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_optional_get_element_cpu')
backend_test.exclude(r'test_optional_get_element_sequence_cpu')
backend_test.exclude(r'test_optional_has_element_cpu')
backend_test.exclude(r'test_optional_has_element_empty_cpu')
def disabled_tests_onnx_1_11_0(backend_test):
# crash
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_roialign_aligned_false_cpu')
backend_test.exclude(r'test_roialign_aligned_true_cpu')
backend_test.exclude(r'test_scatternd_add_cpu')
backend_test.exclude(r'test_scatternd_multiply_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_gridsample_aligncorners_true_cpu')
backend_test.exclude(r'test_gridsample_bicubic_cpu')
backend_test.exclude(r'test_gridsample_bilinear_cpu')
backend_test.exclude(r'test_gridsample_border_padding_cpu')
backend_test.exclude(r'test_gridsample_cpu')
backend_test.exclude(r'test_gridsample_nearest_cpu')
backend_test.exclude(r'test_gridsample_reflection_padding_cpu')
backend_test.exclude(r'test_gridsample_zeros_padding_cpu')
backend_test.exclude(r'test_identity_opt_cpu')
backend_test.exclude(r'test_if_opt_cpu')
backend_test.exclude(r'test_loop16_seq_none_cpu')
def disabled_tests_onnx_1_12_0(backend_test):
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_blackmanwindow_cpu')
backend_test.exclude(r'test_blackmanwindow_expanded_cpu')
backend_test.exclude(r'test_blackmanwindow_symmetric_cpu')
backend_test.exclude(r'test_blackmanwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_dft_axis_cpu')
backend_test.exclude(r'test_dft_cpu')
backend_test.exclude(r'test_dft_inverse_cpu')
backend_test.exclude(r'test_hammingwindow_cpu')
backend_test.exclude(r'test_hammingwindow_expanded_cpu')
backend_test.exclude(r'test_hammingwindow_symmetric_cpu')
backend_test.exclude(r'test_hammingwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_hannwindow_cpu')
backend_test.exclude(r'test_hannwindow_expanded_cpu')
backend_test.exclude(r'test_hannwindow_symmetric_cpu')
backend_test.exclude(r'test_hannwindow_symmetric_expanded_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis0_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis1_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis_negative_1_cpu')
backend_test.exclude(r'test_layer_normalization_2d_axis_negative_2_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis0_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis1_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_3d_axis2_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_1_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_2_epsilon_cpu')
backend_test.exclude(
r'test_layer_normalization_3d_axis_negative_3_epsilon_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis0_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis1_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis2_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis3_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_1_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_2_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_3_cpu')
backend_test.exclude(r'test_layer_normalization_4d_axis_negative_4_cpu')
backend_test.exclude(r'test_layer_normalization_default_axis_cpu')
backend_test.exclude(r'test_melweightmatrix_cpu')
backend_test.exclude(r'test_sequence_map_add_1_sequence_1_tensor_cpu')
backend_test.exclude(
r'test_sequence_map_add_1_sequence_1_tensor_expanded_cpu')
backend_test.exclude(r'test_sequence_map_add_2_sequences_cpu')
backend_test.exclude(r'test_sequence_map_add_2_sequences_expanded_cpu')
backend_test.exclude(r'test_sequence_map_extract_shapes_cpu')
backend_test.exclude(r'test_sequence_map_extract_shapes_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_1_tensor_cpu')
backend_test.exclude(
r'test_sequence_map_identity_1_sequence_1_tensor_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_cpu')
backend_test.exclude(r'test_sequence_map_identity_1_sequence_expanded_cpu')
backend_test.exclude(r'test_sequence_map_identity_2_sequences_cpu')
backend_test.exclude(
r'test_sequence_map_identity_2_sequences_expanded_cpu')
backend_test.exclude(r'test_stft_cpu')
backend_test.exclude(r'test_stft_with_window_cpu')
def disabled_tests_onnx_1_13_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l1_negative_axes_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l1_negative_axes_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_random_cpu')
backend_test.exclude(r'test_reduce_l2_negative_axes_keep_dims_example_cpu')
backend_test.exclude(r'test_reduce_l2_negative_axes_keep_dims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_log_sum_exp_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_square_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_square_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_square_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_square_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_scatternd_max_cpu')
backend_test.exclude(r'test_scatternd_min_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_bitwise_and_i16_3d_cpu')
backend_test.exclude(r'test_bitwise_and_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_and_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_and_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_bitwise_not_2d_cpu')
backend_test.exclude(r'test_bitwise_not_3d_cpu')
backend_test.exclude(r'test_bitwise_not_4d_cpu')
backend_test.exclude(r'test_bitwise_or_i16_4d_cpu')
backend_test.exclude(r'test_bitwise_or_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_or_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_or_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_bitwise_xor_i16_3d_cpu')
backend_test.exclude(r'test_bitwise_xor_i32_2d_cpu')
backend_test.exclude(r'test_bitwise_xor_ui64_bcast_3v1d_cpu')
backend_test.exclude(r'test_bitwise_xor_ui8_bcast_4v3d_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_and_pad_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_and_pad_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_chw_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_chw_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_hwc_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_axes_hwc_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_expanded_cpu')
backend_test.exclude(r'test_center_crop_pad_pad_cpu')
backend_test.exclude(r'test_center_crop_pad_pad_expanded_cpu')
backend_test.exclude(r'test_col2im_5d_cpu')
backend_test.exclude(r'test_col2im_cpu')
backend_test.exclude(r'test_col2im_dilations_cpu')
backend_test.exclude(r'test_col2im_pads_cpu')
backend_test.exclude(r'test_col2im_strides_cpu')
backend_test.exclude(r'test_constant_pad_axes_cpu')
backend_test.exclude(r'test_group_normalization_epsilon_cpu')
backend_test.exclude(r'test_group_normalization_epsilon_expanded_cpu')
backend_test.exclude(r'test_group_normalization_example_cpu')
backend_test.exclude(r'test_group_normalization_example_expanded_cpu')
backend_test.exclude(r'test_mish_cpu')
backend_test.exclude(r'test_mvn_expanded_ver18_cpu')
backend_test.exclude(r'test_optional_get_element_optional_sequence_cpu')
backend_test.exclude(r'test_optional_get_element_optional_tensor_cpu')
backend_test.exclude(r'test_optional_get_element_tensor_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_name_optional_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_name_tensor_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_optional_input_cpu')
backend_test.exclude(
r'test_optional_has_element_empty_no_input_tensor_input_cpu')
backend_test.exclude(r'test_optional_has_element_empty_optional_input_cpu')
backend_test.exclude(r'test_optional_has_element_optional_input_cpu')
backend_test.exclude(r'test_optional_has_element_tensor_input_cpu')
backend_test.exclude(r'test_prelu_broadcast_expanded_cpu')
backend_test.exclude(r'test_prelu_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l1_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_l2_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_l2_default_axes_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_example_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_resize_downsample_scales_cubic_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_scales_linear_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_cubic_antialias_cpu')
backend_test.exclude(r'test_resize_downsample_sizes_linear_antialias_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_not_larger_cpu')
backend_test.exclude(
r'test_resize_downsample_sizes_nearest_not_smaller_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_axes_2_3_cpu')
backend_test.exclude(r'test_resize_tf_crop_and_resize_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_axes_2_3_cpu')
backend_test.exclude(r'test_resize_upsample_scales_nearest_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_axes_2_3_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_axes_3_2_cpu')
backend_test.exclude(r'test_resize_upsample_sizes_nearest_not_larger_cpu')
backend_test.exclude(r'test_scatter_elements_with_reduction_max_cpu')
backend_test.exclude(r'test_scatter_elements_with_reduction_min_cpu')
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_split_1d_uneven_split_opset18_cpu')
backend_test.exclude(r'test_split_2d_uneven_split_opset18_cpu')
def disabled_tests_onnx_1_14_0(backend_test):
# fails
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_averagepool_2d_dilations_cpu')
backend_test.exclude(r'test_roialign_mode_max_cpu')
# errors
# from OnnxBackendNodeModelTest
backend_test.exclude(r'test_basic_deform_conv_with_padding_cpu')
backend_test.exclude(r'test_basic_deform_conv_without_padding_cpu')
backend_test.exclude(r'test_center_crop_pad_crop_negative_axes_hwc_cpu')
backend_test.exclude(
r'test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu')
backend_test.exclude(r'test_constant_pad_negative_axes_cpu')
backend_test.exclude(r'test_deform_conv_with_mask_bias_cpu')
backend_test.exclude(r'test_deform_conv_with_multiple_offset_groups_cpu')
backend_test.exclude(r'test_equal_string_broadcast_cpu')
backend_test.exclude(r'test_equal_string_cpu')
backend_test.exclude(r'test_lppool_1d_default_cpu')
backend_test.exclude(r'test_lppool_2d_default_cpu')
backend_test.exclude(r'test_lppool_2d_dilations_cpu')
backend_test.exclude(r'test_lppool_2d_pads_cpu')
backend_test.exclude(r'test_lppool_2d_same_lower_cpu')
backend_test.exclude(r'test_lppool_2d_same_upper_cpu')
backend_test.exclude(r'test_lppool_2d_strides_cpu')
backend_test.exclude(r'test_lppool_3d_default_cpu')
backend_test.exclude(
r'test_resize_downsample_scales_linear_half_pixel_symmetric_cpu')
backend_test.exclude(
r'test_resize_upsample_scales_linear_half_pixel_symmetric_cpu')
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_softplus_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softplus_expanded_ver18_cpu')
backend_test.exclude(r'test_split_to_sequence_1_cpu')
backend_test.exclude(r'test_split_to_sequence_2_cpu')
backend_test.exclude(r'test_split_to_sequence_nokeepdims_cpu')
backend_test.exclude(r'test_wrap_pad_cpu')
def disabled_tests_float8(backend_test):
# e4m3fn (Prototensor data type 17 not supported)
backend_test.exclude(r'test_dequantizelinear_e4m3fn_cpu')
backend_test.exclude(r'test_quantizelinear_e4m3fn_cpu')
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FN_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FN_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FN_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded_cpu')
# e4m3fnuz (Prototensor data type 18 not supported)
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FNUZ_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E4M3FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(
r'test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FN_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded_cpu')
# e5m2 ( Prototensor data type 19 not supported )
backend_test.exclude(r'test_dequantizelinear_e5m2_cpu')
backend_test.exclude(r'test_quantizelinear_e5m2_cpu')
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2_expanded_cpu')
# e5m2fnuz (Prototensor data type 20 not supported)
backend_test.exclude(r'test_cast_FLOAT16_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2FNUZ_to_FLOAT16_cpu')
backend_test.exclude(r'test_cast_FLOAT8E5M2FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_cast_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(
r'test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT8E5M2_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded_cpu')
def disabled_tests_dynamic_shape(backend_test):
# constantofshape
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
# cumsum
backend_test.exclude(r'test_cumsum_1d_cpu')
backend_test.exclude(r'test_cumsum_1d_exclusive_cpu')
backend_test.exclude(r'test_cumsum_1d_reverse_cpu')
backend_test.exclude(r'test_cumsum_1d_reverse_exclusive_cpu')
backend_test.exclude(r'test_cumsum_2d_axis_0_cpu')
backend_test.exclude(r'test_cumsum_2d_axis_1_cpu')
backend_test.exclude(r'test_cumsum_2d_negative_axis_cpu')
# expand
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
# onehot
backend_test.exclude(r'test_onehot_negative_indices_cpu')
backend_test.exclude(r'test_onehot_with_axis_cpu')
backend_test.exclude(r'test_onehot_with_negative_axis_cpu')
backend_test.exclude(r'test_onehot_without_axis_cpu')
# range
backend_test.exclude(r'test_range_float_type_positive_delta_cpu')
backend_test.exclude(r'test_range_int32_type_negative_delta_cpu')
# split
backend_test.exclude(r'test_split_variable_parts_1d_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_1d_opset18_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_2d_opset18_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_opset13_cpu')
backend_test.exclude(r'test_split_variable_parts_default_axis_opset18_cpu')
backend_test.exclude(r'test_split_zero_size_splits_opset13_cpu')
backend_test.exclude(r'test_split_zero_size_splits_opset18_cpu')
# squeeze
backend_test.exclude(r'test_squeeze_cpu')
backend_test.exclude(r'test_squeeze_negative_axes_cpu')
# unsqueeze
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
# tile
backend_test.exclude(r'test_tile_cpu')
backend_test.exclude(r'test_tile_precomputed_cpu')
# reshape
backend_test.exclude(r'test_reshape_allowzero_reordered_cpu')
backend_test.exclude(r'test_reshape_extended_dims_cpu')
backend_test.exclude(r'test_reshape_negative_dim_cpu')
backend_test.exclude(r'test_reshape_negative_extended_dims_cpu')
backend_test.exclude(r'test_reshape_one_dim_cpu')
backend_test.exclude(r'test_reshape_reduced_dims_cpu')
backend_test.exclude(r'test_reshape_reordered_all_dims_cpu')
backend_test.exclude(r'test_reshape_reordered_last_dims_cpu')
backend_test.exclude(r'test_reshape_zero_and_negative_dim_cpu')
backend_test.exclude(r'test_reshape_zero_dim_cpu')
# reduce
backend_test.exclude(
r'test_reduce_l1_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l1_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_negative_axes_keep_dims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l1_negative_axes_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_example_expanded_cpu')
backend_test.exclude(r'test_reduce_l2_keep_dims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_negative_axes_keep_dims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_l2_negative_axes_keep_dims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_asc_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_asc_axes_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_default_cpu')
backend_test.exclude(r'test_reduce_log_sum_default_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_desc_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_desc_axes_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded_cpu')
backend_test.exclude(r'test_reduce_log_sum_negative_axes_cpu')
backend_test.exclude(r'test_reduce_log_sum_negative_axes_expanded_cpu')
backend_test.exclude(r'test_reduce_max_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_max_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_max_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_max_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_mean_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_mean_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_mean_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_min_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_min_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_prod_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_prod_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_keepdims_random_cpu')
backend_test.exclude(
r'test_reduce_prod_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_prod_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_example_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
def disabled_tests_onnx_1_10_0(backend_test):
# unsupported shape attributes
backend_test.exclude(r'test_shape_end_1_cpu')
backend_test.exclude(r'test_shape_end_negative_1_cpu')
backend_test.exclude(r'test_shape_start_1_cpu')
backend_test.exclude(r'test_shape_start_1_end_2_cpu')
backend_test.exclude(r'test_shape_start_1_end_negative_1_cpu')
backend_test.exclude(r'test_shape_start_negative_1_cpu')
def disabled_tests_onnx_1_12_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_default_axes_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_do_not_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_do_not_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_keepdims_random_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_example_expanded_cpu')
backend_test.exclude(
r'test_reduce_sum_square_negative_axes_keepdims_random_expanded_cpu')
def create_backend_test(testname=None, target_device=None):
......@@ -116,8 +1127,7 @@ def create_backend_test(testname=None, target_device=None):
if testname:
backend_test.include(testname + '.*')
else:
# Include all of the nodes that we support.
# Onnx native node tests
# Onnx Operator tests
backend_test.include(r'.*test_abs.*')
backend_test.include(r'.*test_acos.*')
backend_test.include(r'.*test_acosh.*')
......@@ -131,73 +1141,209 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_atanh.*')
backend_test.include(r'.*test_averagepool.*')
backend_test.include(r'.*test_AvgPool.*')
backend_test.include(r'.*test_BatchNorm.*eval.*')
backend_test.include(r'.*test_[bB]atch[nN]orm(?!.*training).*')
backend_test.include(r'.*test_bitshift.*')
backend_test.include(r'.*test_bitwise.*')
backend_test.include(r'.*test_ceil.*')
backend_test.include(r'.*test_celu.*')
backend_test.include(r'.*test_clip.*')
backend_test.include(r'.*test_cast_.*')
backend_test.include(r'.*test_col2im.*')
backend_test.include(r'.*test_compress.*')
backend_test.include(r'.*test_concat.*')
backend_test.include(r'.*test_constant.*')
backend_test.include(r'.*test_Conv[1-3]d*')
backend_test.include(r'.*test_constant_.*')
backend_test.include(r'.*test_Constant.*')
backend_test.include(r'.*test_constantofshape.*')
backend_test.include(r'.*test_(basic_)?conv_.*')
backend_test.include(r'.*test_Conv[1-3]d.*')
backend_test.include(r'.*test_convinteger.*')
backend_test.include(r'.*test_convtranspose.*')
backend_test.include(r'.*test_ConvTranspose[1-3]d.*')
backend_test.include(r'.*test_cos.*')
backend_test.include(r'.*test_cosh.*')
backend_test.include(r'.*test_cumsum.*')
backend_test.include(r'.*test_(basic_)?deform_conv.*')
backend_test.include(r'.*test_depthtospace.*')
backend_test.include(r'.*test_dequantizelinear')
backend_test.include(r'.*test_dequantizelinear.*')
backend_test.include(r'.*test_det.*')
backend_test.include(r'.*test_dft.*')
backend_test.include(r'.*test_div.*')
backend_test.include(r'.*test_dropout.*')
backend_test.include(r'.*test_ELU*')
backend_test.include(r'.*test_elu.*')
backend_test.include(r'.*test_einsum.*')
backend_test.include(r'.*test_equal.*')
backend_test.include(r'.*test_Embedding*')
backend_test.include(r'.*test_exp.*')
backend_test.include(r'.*test_Embedding.*')
backend_test.include(r'.*test_erf.*')
backend_test.include(r'.*test_exp_.*')
backend_test.include(r'.*test_expand.*')
backend_test.include(r'.*test_eyelike.*')
backend_test.include(r'.*test_flatten.*')
backend_test.include(r'.*test_floor.*')
backend_test.include(r'.*test_fmod.*')
backend_test.include(r'.*test_gru.*')
backend_test.include(r'.*test_gather.*')
backend_test.include(r'.*test_gemm.*')
backend_test.include(r'.*test_globalaveragepool.*')
backend_test.include(r'.*test_globallppool.*')
backend_test.include(r'.*test_globalmaxpool.*')
backend_test.include(r'.*test_greater.*')
backend_test.include(r'.*test_hardsigmoid.*')
backend_test.include(r'.*test_hardswish.*')
backend_test.include(r'.*test_gridsample.*')
backend_test.include(r'.*test_hardmax.*')
backend_test.include(r'.*test_identity.*')
backend_test.include(r'.*test_if.*')
backend_test.include(r'.*test_instancenorm.*')
backend_test.include(r'.*test_isinf.*')
backend_test.include(r'.*test_isnan.*')
backend_test.include(r'.*test_LeakyReLU*')
backend_test.include(r'.*test_leakyrelu.*')
backend_test.include(r'.*test_less.*')
backend_test.include(r'.*test_Linear.*')
backend_test.include(r'.*test_log.*')
backend_test.include(r'.*test_logsoftmax.*')
backend_test.include(r'.*test_LogSoftmax.*')
backend_test.include(r'.*test_log_softmax.*')
backend_test.include(r'.*test_lrn.*')
backend_test.include(r'.*test_lstm.*')
backend_test.include(r'.*test_log.*')
backend_test.include(r'.*test_loop.*')
backend_test.include(r'.*test_lpnorm.*')
backend_test.include(r'.*test_lppool.*')
backend_test.include(r'.*test_matmul.*')
backend_test.include(r'.*test_max.*')
backend_test.include(r'.*test_MaxPool[1-9]d.*')
backend_test.include(r'.*test_max_.*')
backend_test.include(r'.*test_maxpool.*')
backend_test.include(r'.*test_MaxPool[1-3]d.*')
backend_test.include(r'.*test_maxroipool.*')
backend_test.include(r'.*test_maxunpool.*')
backend_test.include(r'.*test_mean.*')
backend_test.include(r'.*test_melweightmatrix.*')
backend_test.include(r'.*test_min.*')
backend_test.include(r' .*test_mod.*')
backend_test.include(r'.*test_mod.*')
backend_test.include(r'.*test_mul.*')
backend_test.include(r'.*test_multinomial.*')
backend_test.include(r'.*test_Multinomial.*')
backend_test.include(r'.*test_[mM]ultinomial.*')
backend_test.include(r'.*test_neg.*')
backend_test.include(r'.*test_nonmaxsuppression.*')
backend_test.include(r'.*test_nonzero.*')
backend_test.include(r'.*test_not.*')
backend_test.include(r'.*test_onehot.*')
backend_test.include(r'.*optional_get_element.*')
backend_test.include(r'.*optional_has_element.*')
backend_test.include(r'.*test_or.*')
backend_test.include(r'.*test_(constant_|edge_|reflect_|wrap_)?pad.*')
backend_test.include(
r'.*test_(Constant|Reflection|Replication|Zero)+Pad2d.*')
backend_test.include(r'.*test_pow.*')
backend_test.include(r'.*test_qlinearconv.*')
backend_test.include(r'.*test_qlinearmatmul.*')
backend_test.include(r'.*test_quantizelinear.*')
backend_test.include(r'.*test_(simple_)?rnn.*')
backend_test.include(r'.*test_randomnormal.*')
backend_test.include(r'.*test_randomuniform.*')
backend_test.include(r'.*test_reciprocal.*')
backend_test.include(r'.*test_reduce_max.*')
backend_test.include(r'.*test_reduce_mean.*')
backend_test.include(r'.*test_reduce_min.*')
backend_test.include(r'.*test_reduce_prod.*')
backend_test.include(r'.*test_reduce_sum.*')
backend_test.include(r'.*test_reshape.*')
backend_test.include(r'.*test_resize.*')
backend_test.include(r'.*test_reversesequence.*')
backend_test.include(r'.*test_roialign.*')
backend_test.include(r'.*test_round.*')
backend_test.include(r'.*test_stft.*')
backend_test.include(r'.*test_scan.*')
backend_test.include(r'.*test_scatter.*')
backend_test.include(r'.*test_sequence_at.*')
backend_test.include(r'.*test_sequence_construct.*')
backend_test.include(r'.*test_sequence_empty.*')
backend_test.include(r'.*test_sequence_erase.*')
backend_test.include(r'.*test_sequence_insert.*')
backend_test.include(r'.*test_sequence_length.*')
backend_test.include(r'.*test_shape.*')
backend_test.include(r'.*test_[sS]igmoid.*')
backend_test.include(r'.*test_sign.*')
backend_test.include(r'.*test_sin_.*')
backend_test.include(r'.*test_sinh.*')
backend_test.include(r'.*test_size.*')
backend_test.include(r'.*test_slice.*')
backend_test.include(r'.*test_spacetodepth.*')
backend_test.include(r'.*test_split.*')
backend_test.include(r'.*test_split_to_sequence.*')
backend_test.include(r'.*test_sqrt.*')
backend_test.include(r'.*test_squeeze.*')
backend_test.include(r'.*test_squeeze.*')
backend_test.include(r'.*test_strnorm.*')
backend_test.include(r'.*test_sub.*')
backend_test.include(r'.*test_sum.*')
backend_test.include(r'.*test_tan_.*')
backend_test.include(r'.*test_[tT]anh.*')
backend_test.include(r'.*test_tfidfvectorizer.*')
backend_test.include(r'.*test_tile.*')
backend_test.include(r'.*test_top_k.*')
backend_test.include(r'.*test_transpose.*')
backend_test.include(r'.*test_tril.*')
backend_test.include(r'.*test_triu.*')
backend_test.include(r'.*test_unique.*')
backend_test.include(r'.*test_unsqueeze.*')
backend_test.include(r'.*test_upsample.*')
backend_test.include(r'.*test_where.*')
backend_test.include(r'.*test_xor.*')
# Onnx Function tests
backend_test.include(r'.*test_bernoulli.*')
backend_test.include(r'.*test_blackmanwindow.*')
backend_test.include(r'.*test_castlike.*')
backend_test.include(r'.*test_celu.*')
backend_test.include(r'.*test_center_crop_pad.*')
backend_test.include(r'.*test_clip.*')
backend_test.include(r'.*test_dynamicquantizelinear.*')
backend_test.include(r'.*test_elu.*')
backend_test.include(r'.*test_ELU.*')
backend_test.include(r'.*test_GLU.*')
backend_test.include(r'.*test_greater_equal.*')
backend_test.include(r'.*test_group_normalization.*')
backend_test.include(r'.*test_hammingwindow.*')
backend_test.include(r'.*test_hannwindow.*')
backend_test.include(r'.*test_hardsigmoid.*')
backend_test.include(r'.*test_hardswish.*')
backend_test.include(r'.*test_layer_normalization.*')
backend_test.include(r'.*test_LeakyReLU.*')
backend_test.include(r'.*test_leakyrelu.*')
backend_test.include(r'.*test_less.*')
backend_test.include(r'.*test_Linear.*')
backend_test.include(r'.*test_logsoftmax.*')
backend_test.include(r'.*test_log_softmax.*')
backend_test.include(r'.*test_LogSoftmax.*')
backend_test.include(r'.*test_mvn.*')
backend_test.include(r'.*test_mish.*')
backend_test.include(r'.*test_nllloss.*')
backend_test.include(r'.*test_PixelShuffle.*')
backend_test.include(r'.*test_PoissonNLLLLoss_no_reduce.*')
backend_test.include(r'.*test_prelu.*')
backend_test.include(r'.*test_PReLU.*')
backend_test.include(r'.*test_range.*')
backend_test.include(r'.*test_reduce_l1.*')
backend_test.include(r'.*test_reduce_l2.*')
backend_test.include(r'.*test_reduce_log.*')
backend_test.include(r'.*test_ReLU.*')
backend_test.include(r'.*test_relu.*')
backend_test.include(r'.*test_selu.*')
backend_test.include(r'.*test_SELU.*')
backend_test.include(r'.*test_sequence_map.*')
backend_test.include(r'.*test_shrink.*')
backend_test.include(r'.*test_[sS]oftmax.*')
backend_test.include(r'.*test_[sS]oftplus.*')
backend_test.include(r'.*test_[sS]oftsign.*')
backend_test.include(r'.*test_sce.*')
backend_test.include(r'.*test_thresholdedrelu.*')
# OnnxBackendPyTorchOperatorModelTest
backend_test.include(r'.*test_operator_add_broadcast.*')
backend_test.include(r'.*test_operator_addconstant.*')
backend_test.include(r'.*test_operator_addmm.*')
backend_test.include(r'.*test_operator_add_size1.*')
backend_test.include(r'.*test_operator_basic.*')
backend_test.include(r'.*test_operator_chunk.*')
backend_test.include(r'.*test_operator_clip.*')
backend_test.include(r'.*test_operator_concat2.*')
backend_test.include(r'.*test_operator_conv_.*')
backend_test.include(r'.*test_operator_convtranspose.*')
backend_test.include(r'.*test_operator_exp.*')
backend_test.include(r'.*test_operator_flatten.*')
backend_test.include(r'.*test_operator_index.*')
backend_test.include(r'.*test_operator_max_.*')
backend_test.include(r'.*test_operator_maxpool.*')
backend_test.include(r'.*test_operator_min.*')
backend_test.include(r'.*test_operator_mod.*')
backend_test.include(r'.*test_operator_mm.*')
backend_test.include(r'.*test_operator_non_float_params.*')
backend_test.include(r'.*test_operator_pad.*')
backend_test.include(r'.*test_operator_params.*')
backend_test.include(r'.*test_operator_permute2.*')
backend_test.include(r'.*test_operator_pow.*')
......@@ -205,55 +1351,19 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_operator_reduced_mean_keepdim.*')
backend_test.include(r'.*test_operator_reduced_sum_.*')
backend_test.include(r'.*test_operator_reduced_sum_keepdim.*')
backend_test.include(r'.*test_operator_repeat.*')
backend_test.include(r'.*test_operator_selu.*')
backend_test.include(r'.*test_operator_sqrt.*')
backend_test.include(r'.*test_operator_symbolic_override.*')
backend_test.include(r'.*test_operator_symbolic_override_nested.*')
backend_test.include(r'.*test_operator_view.*')
backend_test.include(r'.*test_or.*')
backend_test.include(r'.*test_pow.*')
backend_test.include(r'.*test_PoissonNLLLLoss_no_reduce*')
backend_test.include(r'.*test_quantizelinear')
backend_test.include(r'.*test_reciprocal.*')
backend_test.include(r'.*test_reduce.*')
backend_test.include(r'.*test_ReLU*')
backend_test.include(r'.*test_relu.*')
#backend_test.include(r'.*test_reversesequence.*')
backend_test.include(r'.*test_RoiAlign*')
backend_test.include(r'.*test_roialign.*')
backend_test.include(r'.*test_scatter.*')
backend_test.include(r'.*test_Scatter.*')
backend_test.include(r'.*test_selu.*')
backend_test.include(r'.*test_shape.*')
backend_test.include(r'.*test_Sigmoid*')
backend_test.include(r'.*test_sigmoid.*')
backend_test.include(r'.*test_sin.*')
backend_test.include(r'.*test_sinh.*')
backend_test.include(r'.*test_size.*')
backend_test.include(r'.*test_Softmax*')
backend_test.include(r'.*test_softmax.*')
backend_test.include(r'.*test_Softmin*')
backend_test.include(r'.*test_Softplus*')
backend_test.include(r'.*test_softplus.*')
backend_test.include(r'.*test_softsign.*')
backend_test.include(r'.*test_sqrt.*')
backend_test.include(r'.*test_squeeze_cuda')
backend_test.include(r'.*test_sub.*')
backend_test.include(r'.*test_sum.*')
backend_test.include(r'.*test_tan.*')
backend_test.include(r'.*test_Tanh*')
backend_test.include(r'.*test_tanh.*')
backend_test.include(r'.*test_thresholdedrelu.*')
backend_test.include(r'.*test_topk.*')
backend_test.include(r'.*test_Topk.*')
backend_test.include(r'.*test_transpose.*')
backend_test.include(r'.*test_unsqueeze.*')
backend_test.include(r'.*test_where*')
backend_test.include(r'.*test_where.*')
backend_test.include(r'.*test_xor.*')
backend_test.include(r'.*test_ZeroPad2d*')
# # Onnx native model tests
# OnnxBackendSimpleModelTest
backend_test.include(r'.*test_gradient_of.*')
backend_test.include(r'.*test_sequence_model.*')
backend_test.include(r'.*test_single_relu_model.*')
# OnnxBackendRealModelTest
backend_test.include(r'.*test_bvlc_alexnet.*')
backend_test.include(r'.*test_densenet121.*')
backend_test.include(r'.*test_inception_v1.*')
......@@ -264,76 +1374,58 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_vgg19.*')
backend_test.include(r'.*test_zfnet512.*')
# exclude unenabled ops get pulled in with wildcards
# test_constant_pad gets pulled in with the test_constant* wildcard. Explicitly disable padding tests for now.
# Operator MATMULINTEGER is not supported by TRT
backend_test.exclude(r'.*test_matmulinteger.*')
backend_test.exclude(r'.*test_maxunpool.*')
# Absolute diff failed because
# numpy compares the difference between actual and desired to atol + rtol * abs(desired)
# failed test cases
backend_test.exclude(
r'test_argmax_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmax_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmax_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_keepdims_example_select_last_index_cpu')
backend_test.exclude(
r'test_argmin_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_lrn_cpu')
backend_test.exclude(r'test_lrn_default_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
backend_test.exclude(r'test_MaxPool2d_stride_padding_dilation_cpu')
backend_test.exclude(r'test_MaxPool1d_stride_padding_dilation_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
# error cases
backend_test.exclude(r'test_constant_pad_cpu')
backend_test.exclude(r'test_constantofshape_float_ones_cpu')
backend_test.exclude(r'test_constantofshape_int_shape_zero_cpu')
backend_test.exclude(r'test_constantofshape_int_zeros_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
# all reduce ops have dynamic axes inputs
backend_test.exclude(r'test_softmax_cross_entropy_*')
backend_test.exclude(r'test_Embedding_cpu')
# real model tests
# Skipped tests
# backend_test.include(r'.*test_adagrad.*')
# backend_test.include(r'.*test_adam.*')
# backend_test.include(r'.*test_ai_onnx_ml.*')
# backend_test.include(r'.*test_batchnorm_epsilon_training.*')
# backend_test.include(r'.*test_batchnorm_example_training.*')
# backend_test.include(r'.*test_momentum.*')
# backend_test.include(r'.*test_nesterov_momentum.*')
# backend_test.include(r'.*test_training_dropout.*')
# backend_test.include(r'.*test_Softmin.*')
# Exclude failing tests
# from OnnxBackendRealModelTest
backend_test.exclude(r'test_inception_v1_cpu')
backend_test.exclude(r'test_resnet50_cpu')
backend_test.exclude(r'test_squeezenet_cpu')
# PRelu OnnxBackendPyTorchConvertedModelTest has wrong dim for broadcasting
backend_test.exclude(r'[a-z,_]*PReLU_[0-9]d_multiparam[a-z,_]*')
# Remove when float8 is supported
disabled_tests_float8(backend_test)
# Remove when dynamic shapes are supported
disabled_tests_dynamic_shape(backend_test)
# additional cases disabled for a specific onnx version
if version.parse(onnx.__version__) <= version.parse("1.7.0"):
if version.parse(onnx.__version__) >= version.parse("1.7.0"):
disabled_tests_onnx_1_7_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.8.0"):
disabled_tests_onnx_1_8_1(backend_test)
disabled_tests_onnx_1_8_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.9.0"):
disabled_tests_onnx_1_9_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.10.0"):
disabled_tests_onnx_1_10_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.11.0"):
disabled_tests_onnx_1_11_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.12.0"):
disabled_tests_onnx_1_12_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.13.0"):
disabled_tests_onnx_1_13_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.14.0"):
disabled_tests_onnx_1_14_0(backend_test)
# import all test cases at global scope to make
# them visible to python.unittest.
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -21,23 +21,9 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CLANG_FORMAT=/opt/rocm/llvm/bin/clang-format
SRC_DIR=$DIR/../src
PYTHON=python3
if type -p python3.6 > /dev/null ; then
PYTHON=python3.6
fi
if type -p python3.8 > /dev/null ; then
PYTHON=python3.8
fi
ls -1 $DIR/include/ | xargs -n 1 -P $(nproc) -I{} -t bash -c "$PYTHON $DIR/te.py $DIR/include/{} | $CLANG_FORMAT -style=file > $SRC_DIR/include/migraphx/{}"
function api {
$PYTHON $DIR/api.py $SRC_DIR/api/migraphx.py $1 | $CLANG_FORMAT -style=file > $2
}
api $DIR/api/migraphx.h $SRC_DIR/api/include/migraphx/migraphx.h
echo "Finished generating header migraphx.h"
api $DIR/api/api.cpp $SRC_DIR/api/api.cpp
echo "Finished generating source api.cpp "
onnx==1.14.1
protobuf==3.20.2
numpy==1.21.6
packaging==23.0
pytest==6.0.1
\ No newline at end of file
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
numpy==1.21.6
\ No newline at end of file
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -21,12 +21,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import sys
import migraphx
try:
import numpy as np
except:
sys.exit()
import numpy as np
def test_conv_relu():
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -23,10 +23,55 @@
#####################################################################################
import migraphx
import ctypes
import os
import glob
def test_conv_relu():
hip = ctypes.cdll.LoadLibrary("libamdhip64.so")
# Full path of the library is needed to fix an issue on sles
# where the library is not loaded otherwise.
# We check for the presence of library at the following paths,
# in the order listed below:
#
# 1. 'rocm_path' environment variable
# 2. /opt/rocm
# 3. /opt/rocm-*
#
# If the library is not found at any of these paths, we fall back
# to the library path being detected automatically.
library = "libamdhip64.so"
# Environment variable containing path to rocm
rocm_path_env_var = "rocm_path"
# Check for rocm_path, default to /opt/rocm if it does not exist.
rocm_path_var = os.getenv(rocm_path_env_var, default="/opt/rocm")
# Join the paths to the library to get full path,
# e.g. /opt/rocm/lib/libamdhip64.so
library_file = os.path.join(rocm_path_var, "lib", library)
# Check if the library file exists at the specified path
if os.path.exists(library_file):
# Replace library name by full path to the library
library = library_file
else:
# Pattern match to look for path to different
# rocm versions: /opt/rocm-*
rocm_path_pattern = "/opt/rocm-*/lib/libamdhip64.so"
matching_libraries = glob.glob(rocm_path_pattern)
if matching_libraries:
# Replace library name by full path to the first
# library found.
library = matching_libraries[0]
# Loads library either by using the full path to the
# library, if it has been detected earlier,
# or, proceeds to load the library based on the name
# of the library.
hip = ctypes.cdll.LoadLibrary(library)
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -21,11 +21,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import migraphx, sys
try:
import numpy as np
except:
sys.exit()
import migraphx
import numpy as np
def test_add_op():
......
......@@ -24,7 +24,6 @@
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/register_target.hpp>
......@@ -84,7 +83,7 @@ TEST_CASE(param_add)
auto hs = mm->add_instruction(migraphx::make_op("add"), hp1, hp2);
auto fs = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
hs);
if(add_return)
{
......@@ -1014,7 +1013,7 @@ TEST_CASE(target_copy)
std::vector<float> orig_result;
run_prog(p, ref_t, m, orig_result);
EXPECT(migraphx::verify::verify_range(ref_result, orig_result));
EXPECT(migraphx::verify::verify_rms_range(ref_result, orig_result));
}
}
......@@ -1078,7 +1077,10 @@ TEST_CASE(int8_quantization_dot)
std::vector<float> no_quant_result;
run_prog(p, ref_t, m, no_quant_result);
EXPECT(migraphx::verify::verify_range(quant_result, no_quant_result, 30000));
EXPECT(migraphx::verify::verify_range_with_tolerance(
quant_result,
migraphx::verify::expected{no_quant_result},
migraphx::verify::tolerance{0.003}));
}
}
......@@ -1123,7 +1125,7 @@ TEST_CASE(int8_quantization_conv)
std::vector<float> no_quant_result;
run_prog(p, ref_t, no_quant_result);
EXPECT(migraphx::verify::verify_range(quant_result, no_quant_result));
EXPECT(migraphx::verify::verify_rms_range(quant_result, no_quant_result));
}
}
......@@ -1275,7 +1277,7 @@ TEST_CASE(test_op_capture)
cap_res.visit([&](auto output) { cap_vec.assign(output.begin(), output.end()); });
res.visit([&](auto output) { vec.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_range(vec, cap_vec));
EXPECT(migraphx::verify::verify_rms_range(vec, cap_vec));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment