Commit e08b425f authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into refactor_dynamic_compute

parents fbe13c96 5fa42993
......@@ -394,6 +394,31 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(p == prog);
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 5}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {5}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {5}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {5}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {5}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_rank_2_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p;
......
......@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_rank_2_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {2, 5}};
migraphx::shape c_shape(migraphx::shape::float_type, {5});
std::vector<float> x_data = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
std::vector<float> scale_data(5, 1.);
std::vector<float> bias_data(5, 0.);
std::vector<float> mean_data = {1., 2., 1., 2., 1.};
std::vector<float> variance_data(5, 0.5);
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.,
0.,
2.8284243,
2.8284243,
5.65684859,
7.07106074,
7.07106074,
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
......
......@@ -81,16 +81,6 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(batch_norm_inference_shape)
{
const size_t channels = 3;
migraphx::shape s{migraphx::shape::float_type, {4, channels, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
expect_shape(s, migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars);
throws_shape(migraphx::make_op("batch_norm_inference"), s);
throws_shape(migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars, vars);
}
TEST_CASE(broadcast)
{
{
......
......@@ -28,7 +28,6 @@
#include <limits>
#include <migraphx/literal.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp>
......@@ -650,202 +649,6 @@ TEST_CASE(avgpool_test)
}
}
TEST_CASE(batch_norm_1d_per_actv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape x_shape{migraphx::shape::float_type, {2, 2, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {2, 4});
std::vector<float> x_data = {0.3547,
0.477,
-1.8575,
0.663,
-0.1881,
-0.5113,
-0.1803,
-0.5915,
-0.1552,
0.9821,
1.827,
0.0558,
-0.0417,
-1.0693,
1.9948,
-0.7448};
std::vector<float> scale_data = {
-0.3181, -0.3885, 1.655, 0.0704, -0.2565, -1.1761, -0.3751, 0.1057};
std::vector<float> bias_data = {
-1.2118, -2.1156, 0.0046, -0.1341, -0.2724, -1.0718, 0.5535, -0.889};
std::vector<float> mean_data = {
0.0997, 0.7295, -0.0153, 0.3594, -0.1149, -0.7903, 0.9073, -0.6681};
std::vector<float> variance_data = {
0.13, 0.1276, 6.7878, 0.1843, 0.0107, 0.1556, 2.3655, 0.0117};
auto x = mm->add_literal(migraphx::literal{x_shape, x_data});
auto scale = mm->add_literal(migraphx::literal{c_shape, scale_data});
auto bias = mm->add_literal(migraphx::literal{c_shape, bias_data});
auto mean = mm->add_literal(migraphx::literal{c_shape, mean_data});
auto variance = mm->add_literal(migraphx::literal{c_shape, variance_data});
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1e-6},
{"momentum", 0.9},
{"bn_mode", migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-1.43677,
-1.84098,
-1.16563,
-0.0843136,
-0.090896,
-1.90364,
0.81875,
-0.81415,
-0.986915,
-2.39032,
1.17489,
-0.183886,
-0.453904,
-0.239955,
0.288275,
-0.963948};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape x_shape{migraphx::shape::float_type, {2, 3, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {3});
std::vector<float> x_data = {0.7253, -0.6356, 0.4606, -0.8689, -1.1932, 0.4538,
-1.0018, -0.365, -0.214, -0.9553, -0.7672, 0.2331,
-0.8416, -0.6142, 0.0814, 0.2498, -0.6706, 1.4872,
0.5112, -1.5212, -0.9126, 0.0735, 1.085, -0.3417};
std::vector<float> scale_data = {1.1, 1.2, 1.3};
std::vector<float> bias_data = {0.1, 0.2, 0.3};
std::vector<float> mean_data = {-0.1804, -0.2875, -0.2249};
std::vector<float> variance_data = {2.7914, 7.3424, 3.3287};
auto x = mm->add_literal(migraphx::literal{x_shape, x_data});
auto scale = mm->add_literal(migraphx::literal{c_shape, scale_data});
auto bias = mm->add_literal(migraphx::literal{c_shape, bias_data});
auto mean = mm->add_literal(migraphx::literal{c_shape, mean_data});
auto variance = mm->add_literal(migraphx::literal{c_shape, variance_data});
mm->add_instruction(migraphx::make_op("batch_norm_inference", {{"epsilon", 1e-5}}),
x,
scale,
bias,
mean,
variance);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.696301, -0.199697, 0.522026, -0.353299, -0.201094, 0.528289,
-0.116332, 0.165679, 0.307767, -0.220435, -0.086407, 0.62634,
-0.335325, -0.185608, 0.272366, 0.383238, 0.0303421, 0.985936,
0.553709, -0.346351, -0.190009, 0.51262, 1.23335, 0.216776};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_3d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape x_shape{migraphx::shape::float_type, {2, 2, 2, 2, 2}};
migraphx::shape c_shape(migraphx::shape::float_type, {2});
std::vector<float> x_data = {-1.0833, 1.9681, 1.2075, -0.723, -0.4076, -0.8738, 0.5853,
-0.5357, 1.734, 0.7904, 0.6953, -0.468, -0.425, 0.6895,
0.0096, 0.4205, -0.1749, 1.2821, 2.1453, -0.8538, 1.0687,
0.0906, 0.0714, -1.3079, -0.6376, 1.3023, 0.945, 0.0927,
-0.7421, -1.4341, -1.0309, 1.5153};
std::vector<float> scale_data = {1.1, 1.3};
std::vector<float> bias_data = {0.1, 0.2};
std::vector<float> mean_data = {0.1537, 0.2161};
std::vector<float> variance_data = {18.0805, 13.3906};
auto x = mm->add_literal(migraphx::literal{x_shape, x_data});
auto scale = mm->add_literal(migraphx::literal{c_shape, scale_data});
auto bias = mm->add_literal(migraphx::literal{c_shape, bias_data});
auto mean = mm->add_literal(migraphx::literal{c_shape, mean_data});
auto variance = mm->add_literal(migraphx::literal{c_shape, variance_data});
mm->add_instruction(migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
-0.220005, 0.569376, 0.372612, -0.126798, -0.0452053, -0.165809, 0.211653, -0.0783441,
0.739245, 0.404024, 0.370239, -0.0430317, -0.0277556, 0.368179, 0.126639, 0.272615,
0.0149929, 0.391911, 0.615216, -0.160635, 0.336706, 0.0836764, 0.0787094, -0.278108,
-0.103283, 0.585881, 0.458947, 0.156161, -0.140408, -0.386246, -0.243006, 0.661551};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_inference_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
const size_t width = 2;
const size_t height = 2;
const size_t channels = 4;
const size_t batches = 2;
const float x_val = 8.0;
const float mean_val = 2.0;
const float variance_val = 4.0;
const float scale_val = 2.0f;
const float bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
std::vector<float> x_data(width * height * channels * batches);
std::vector<float> scale_data(channels);
std::vector<float> bias_data(channels);
std::vector<float> mean_data(channels);
std::vector<float> variance_data(channels);
std::fill(x_data.begin(), x_data.end(), x_val);
std::fill(mean_data.begin(), mean_data.end(), mean_val);
std::fill(variance_data.begin(), variance_data.end(), variance_val);
std::fill(scale_data.begin(), scale_data.end(), scale_val);
std::fill(bias_data.begin(), bias_data.end(), bias_val);
auto x = mm->add_literal(migraphx::literal{s, x_data});
auto scale = mm->add_literal(migraphx::literal{vars, scale_data});
auto bias = mm->add_literal(migraphx::literal{vars, bias_data});
auto mean = mm->add_literal(migraphx::literal{vars, mean_data});
auto variance = mm->add_literal(migraphx::literal{vars, variance_data});
mm->add_instruction(migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> result_vector(width * height * channels * batches);
std::vector<float> gold(width * height * channels * batches);
std::fill(gold.begin(), gold.end(), output_val);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(broadcast_test)
{
migraphx::program p;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/verify.hpp>
bool is_batch_norm(migraphx::instruction& ins) { return ins.name() == "batch_norm_inference"; }
TEST_CASE(fwd_conv_batchnorm_rewrite_test)
{
std::vector<float> xdata = {
0.26485917, 0.61703885, 0.32762103, 0.2503367, 0.6552712, 0.07947932, 0.95442678,
0.70892651, 0.890563, 0.80808088, 0.89540492, 0.52657048, 0.94614791, 0.64371508,
0.0971229, 0.2475562, 0.47405955, 0.85538928, 0.05428386, 0.993078, 0.72771973,
0.18312255, 0.3091522, 0.51396558, 0.35158192, 0.2419852, 0.83691474, 0.36355352,
0.04769134, 0.08312604, 0.61804092, 0.0508887, 0.30987137, 0.81307629, 0.16398955,
0.69886166, 0.02415926, 0.60608918, 0.81907569, 0.13208211, 0.48303735, 0.87533734,
0.92998813, 0.65553674, 0.73223327, 0.99401001, 0.09850688, 0.76972609, 0.11118327,
0.04392097, 0.39252306, 0.91129653, 0.89078693, 0.60571206, 0.98410397, 0.15290698,
0.86992609, 0.7575111, 0.80583525, 0.23649562, 0.7478029, 0.62888878, 0.39886601,
0.37066793, 0.72627947, 0.8745595, 0.13568234, 0.7413787, 0.5039495, 0.18945697,
0.87046838, 0.63970494, 0.01124038, 0.27459063, 0.65745586, 0.69182619, 0.80470603,
0.58039348, 0.36950583, 0.43634225, 0.01694425, 0.14099377, 0.77015849, 0.35809292,
0.40547674, 0.46538817, 0.65835358, 0.2266954, 0.39057646, 0.64642207, 0.84491134,
0.20998067, 0.41074121, 0.73055221, 0.26424874, 0.10612507, 0.24478521, 0.24091282,
0.52536754, 0.57292341, 0.82190903, 0.51858515, 0.17162996, 0.52048114, 0.96624787,
0.17527163, 0.56384485, 0.91991603};
std::vector<float> wdata = {
-1.12125056, 0.50228441, 1.12719446, -2.61705068, -0.2027315, -0.82199441, 0.05337102,
-0.62146691, -2.40572931, -1.47175612, 1.49654601, -1.07070376, -0.65908074, -0.28457694,
1.60046717, 0.20677642, -1.51844486, 0.41203847, -0.01285751, 0.07948031, -0.91507006,
-1.59481079, -0.12856238, 0.39970482, -1.89015158, 0.66969754, 0.10312618};
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 6, 6}};
migraphx::shape ws{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {1}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(xs, xdata);
auto w = mm->add_literal(ws, wdata);
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::literal{vars, {3.0f}});
auto bias = mm->add_literal(migraphx::literal{vars, {8.1f}});
auto mean = mm->add_literal(migraphx::literal{vars, {4.0f}});
auto variance = mm->add_literal(migraphx::literal{vars, {37.11f}});
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
std::vector<float> results_vector1;
std::vector<float> results_vector2;
result1.visit([&](auto output) { results_vector1.assign(output.begin(), output.end()); });
result2.visit([&](auto output) { results_vector2.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector1, results_vector2));
}
TEST_CASE(non_literal)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
}
TEST_CASE(as_literal)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(as_literal_1d)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(as_literal_3d)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 2, 4, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::convolution conv_op;
conv_op.padding = {0, 0, 0};
conv_op.stride = {1, 1, 1};
conv_op.dilation = {1, 1, 1};
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(conv_op, x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(literal_reshape)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(literal_reshape_per_actv)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 7, 4}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4, 8, 7, 4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(
migraphx::make_op(
"convolution",
{{"padding", {0, 0, 0}}, {"stride", {1, 1, 1}}, {"dilation", {1, 1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.88},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
conv,
scale,
bias,
mean,
variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -120,19 +120,45 @@ def batchnorm_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
@tf_test
def batchnorm_half_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float16,
shape=(1, 16, 16, 32),
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
......@@ -142,19 +168,21 @@ def batchnormv3_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.raw_ops.FusedBatchNormV3(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-6,
is_training=False,
name='batchnorm1')
......
......@@ -24,6 +24,7 @@
#include <iostream>
#include <vector>
#include <unordered_map>
#include <migraphx/common.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/simplify_reshapes.hpp>
......@@ -33,7 +34,6 @@
#include <migraphx/instruction.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/pooling.hpp>
......@@ -186,50 +186,94 @@ TEST_CASE(batchmatmul_test)
TEST_CASE(batchnorm_test)
{
float epsilon = 1.001e-5f;
float momentum = 0.9f;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::batch_norm_inference op{
epsilon, momentum, migraphx::op::batch_norm_inference::spatial};
migraphx::shape s0{migraphx::shape::float_type, {32}};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 32, 16, 16}});
std::vector<float> const_vals(32);
std::fill(const_vals.begin(), const_vals.end(), 1.0f);
auto l2 = mm->add_parameter("2", s0);
auto l3 = mm->add_parameter("3", s0);
auto l4 = mm->add_parameter("4", s0);
auto l1 = mm->add_literal(migraphx::literal{s0, const_vals});
mm->add_instruction(op, l0, l1, l2, l3, l4);
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(batchnorm_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::half_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_half_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(batchnormv3_test)
{
float epsilon = 1.0e-5f;
float momentum = 0.9f;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::batch_norm_inference op{
epsilon, momentum, migraphx::op::batch_norm_inference::spatial};
migraphx::shape s0{migraphx::shape::float_type, {32}};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 32, 16, 16}});
std::vector<float> const_vals(32);
std::fill(const_vals.begin(), const_vals.end(), 1.0f);
auto l2 = mm->add_parameter("2", s0);
auto l3 = mm->add_parameter("3", s0);
auto l4 = mm->add_parameter("4", s0);
auto l1 = mm->add_literal(migraphx::literal{s0, const_vals});
mm->add_instruction(op, l0, l1, l2, l3, l4);
auto prog = optimize_tf("batchnormv3_test.pb", true);
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnormv3_test.pb", true);
EXPECT(p == prog);
}
......
......@@ -27,26 +27,21 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_1d : verify_program<test_batchnorm_1d>
struct quant_conv_1d : verify_program<quant_conv_1d>
{
const size_t size = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, size}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
migraphx::shape a_shape{migraphx::shape::int8_type, {2, 3, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
migraphx::make_op("quant_convolution",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
pa,
pc);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_1d_per_actv : verify_program<test_batchnorm_1d_per_actv>
{
const size_t d1 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.96f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_2d_per_actv : verify_program<test_batchnorm_2d_per_actv>
{
const size_t d1 = 2;
const size_t d2 = 4;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.9f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_3d : verify_program<test_batchnorm_3d>
{
const size_t d1 = 2;
const size_t d2 = 2;
const size_t d3 = 2;
const size_t channels = 2;
const size_t batches = 2;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2, d3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_3d_per_actv : verify_program<test_batchnorm_3d_per_actv>
{
const size_t d1 = 2;
const size_t d2 = 4;
const size_t d3 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2, d3}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2, d3}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.8f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
{
const size_t width = 3;
const size_t height = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference_2 : verify_program<test_batchnorm_inference_2>
{
const size_t width = 14;
const size_t height = 14;
const size_t channels = 256;
const size_t batches = 1;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
......@@ -26,6 +26,8 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn : verify_program<test_conv_bn>
{
......@@ -37,19 +39,38 @@ struct test_conv_bn : verify_program<test_conv_bn>
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 224, 224}};
migraphx::shape ws{migraphx::shape::float_type, {64, 3, 7, 7}};
migraphx::shape vars{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
// non-symmetrical tiling
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {3, 3}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
return p;
}
};
......@@ -26,21 +26,38 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_add : verify_program<test_conv_bn_add>
{
static migraphx::instruction_ref add_bn(migraphx::module& m,
migraphx::instruction_ref x,
std::size_t channels,
std::size_t seed = 1)
static migraphx::instruction_ref add_bn(migraphx::module& m, migraphx::instruction_ref x)
{
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + seed)));
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + seed)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + seed)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + seed)));
return m.add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
auto bn_lens = x->get_shape().lens();
auto c_len = bn_lens.at(1);
migraphx::shape vars{migraphx::shape::float_type, {c_len}};
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + c_len)));
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + c_len)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + c_len)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + c_len)));
auto rt = m.add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = m.add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(m, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(m, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(m, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(m, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(m, migraphx::make_op("mul"), {div0, usq_scale});
return add_common_op(m, migraphx::make_op("add"), {r0, usq_bias});
}
migraphx::program create_program() const
......@@ -57,10 +74,10 @@ struct test_conv_bn_add : verify_program<test_conv_bn_add>
{migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2));
auto relu1 = mm->add_instruction(migraphx::make_op("relu"), x);
auto conv1 = mm->add_instruction(migraphx::make_op("convolution"), relu1, w);
auto bn1 = add_bn(*mm, conv1, ochannels, 1);
auto bn1 = add_bn(*mm, conv1);
auto relu2 = mm->add_instruction(migraphx::make_op("relu"), y);
auto conv2 = mm->add_instruction(migraphx::make_op("convolution"), relu2, v);
auto bn2 = add_bn(*mm, conv2, ochannels, 1);
auto bn2 = add_bn(*mm, conv2);
auto sum = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
mm->add_instruction(migraphx::make_op("relu"), sum);
return p;
......
......@@ -27,6 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
{
......@@ -49,8 +51,26 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
auto bn = mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto bn = add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto relu = mm->add_instruction(migraphx::make_op("relu"), bn);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment