"...nlp/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "faefeef937b74061dc18d527739c76e8f9901950"
Commit 9db8a28d authored by Paul's avatar Paul
Browse files

Merge

parents 1f8aa24f 4b1c1c41
......@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_rank_2_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {2, 5}};
migraphx::shape c_shape(migraphx::shape::float_type, {5});
std::vector<float> x_data = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
std::vector<float> scale_data(5, 1.);
std::vector<float> bias_data(5, 0.);
std::vector<float> mean_data = {1., 2., 1., 2., 1.};
std::vector<float> variance_data(5, 0.5);
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.,
0.,
2.8284243,
2.8284243,
5.65684859,
7.07106074,
7.07106074,
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
......
......@@ -81,16 +81,6 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(batch_norm_inference_shape)
{
const size_t channels = 3;
migraphx::shape s{migraphx::shape::float_type, {4, channels, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
expect_shape(s, migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars);
throws_shape(migraphx::make_op("batch_norm_inference"), s);
throws_shape(migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars, vars);
}
TEST_CASE(broadcast)
{
{
......@@ -261,8 +251,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
{"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
input_dyn_shape,
weights_shape);
......@@ -275,8 +264,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
{"padding_mode", migraphx::op::padding_mode_t::same_upper}}),
input_dyn_shape,
weights_shape);
......@@ -290,8 +278,7 @@ TEST_CASE(convolution_shape)
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
{"padding_mode", migraphx::op::padding_mode_t::same_lower}}),
input_dyn_shape,
weights_shape);
}
......
......@@ -29,7 +29,6 @@
#include <migraphx/module.hpp>
#include <sstream>
#include <string>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
......
This diff is collapsed.
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/verify.hpp>
bool is_batch_norm(migraphx::instruction& ins) { return ins.name() == "batch_norm_inference"; }
TEST_CASE(fwd_conv_batchnorm_rewrite_test)
{
std::vector<float> xdata = {
0.26485917, 0.61703885, 0.32762103, 0.2503367, 0.6552712, 0.07947932, 0.95442678,
0.70892651, 0.890563, 0.80808088, 0.89540492, 0.52657048, 0.94614791, 0.64371508,
0.0971229, 0.2475562, 0.47405955, 0.85538928, 0.05428386, 0.993078, 0.72771973,
0.18312255, 0.3091522, 0.51396558, 0.35158192, 0.2419852, 0.83691474, 0.36355352,
0.04769134, 0.08312604, 0.61804092, 0.0508887, 0.30987137, 0.81307629, 0.16398955,
0.69886166, 0.02415926, 0.60608918, 0.81907569, 0.13208211, 0.48303735, 0.87533734,
0.92998813, 0.65553674, 0.73223327, 0.99401001, 0.09850688, 0.76972609, 0.11118327,
0.04392097, 0.39252306, 0.91129653, 0.89078693, 0.60571206, 0.98410397, 0.15290698,
0.86992609, 0.7575111, 0.80583525, 0.23649562, 0.7478029, 0.62888878, 0.39886601,
0.37066793, 0.72627947, 0.8745595, 0.13568234, 0.7413787, 0.5039495, 0.18945697,
0.87046838, 0.63970494, 0.01124038, 0.27459063, 0.65745586, 0.69182619, 0.80470603,
0.58039348, 0.36950583, 0.43634225, 0.01694425, 0.14099377, 0.77015849, 0.35809292,
0.40547674, 0.46538817, 0.65835358, 0.2266954, 0.39057646, 0.64642207, 0.84491134,
0.20998067, 0.41074121, 0.73055221, 0.26424874, 0.10612507, 0.24478521, 0.24091282,
0.52536754, 0.57292341, 0.82190903, 0.51858515, 0.17162996, 0.52048114, 0.96624787,
0.17527163, 0.56384485, 0.91991603};
std::vector<float> wdata = {
-1.12125056, 0.50228441, 1.12719446, -2.61705068, -0.2027315, -0.82199441, 0.05337102,
-0.62146691, -2.40572931, -1.47175612, 1.49654601, -1.07070376, -0.65908074, -0.28457694,
1.60046717, 0.20677642, -1.51844486, 0.41203847, -0.01285751, 0.07948031, -0.91507006,
-1.59481079, -0.12856238, 0.39970482, -1.89015158, 0.66969754, 0.10312618};
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 6, 6}};
migraphx::shape ws{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {1}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(xs, xdata);
auto w = mm->add_literal(ws, wdata);
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::literal{vars, {3.0f}});
auto bias = mm->add_literal(migraphx::literal{vars, {8.1f}});
auto mean = mm->add_literal(migraphx::literal{vars, {4.0f}});
auto variance = mm->add_literal(migraphx::literal{vars, {37.11f}});
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
std::vector<float> results_vector1;
std::vector<float> results_vector2;
result1.visit([&](auto output) { results_vector1.assign(output.begin(), output.end()); });
result2.visit([&](auto output) { results_vector2.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector1, results_vector2));
}
TEST_CASE(non_literal)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
}
TEST_CASE(as_literal)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(as_literal_1d)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(as_literal_3d)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 2, 4, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::convolution conv_op;
conv_op.padding = {0, 0, 0};
conv_op.stride = {1, 1, 1};
conv_op.dilation = {1, 1, 1};
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(conv_op, x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(literal_reshape)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 8}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(migraphx::make_op("convolution"), x, w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
TEST_CASE(literal_reshape_per_actv)
{
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 8, 7, 4}};
migraphx::shape ws{migraphx::shape::float_type, {4, 3, 1, 1, 1}};
migraphx::shape vars{migraphx::shape::float_type, {4, 8, 7, 4}};
auto create_program = [&]() {
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_literal(migraphx::generate_literal(xs, 1));
auto w = mm->add_literal(migraphx::generate_literal(ws, 1));
auto conv = mm->add_instruction(
migraphx::make_op(
"convolution",
{{"padding", {0, 0, 0}}, {"stride", {1, 1, 1}}, {"dilation", {1, 1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.88},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
conv,
scale,
bias,
mean,
variance);
return p;
};
migraphx::program p1 = create_program();
migraphx::program p2 = create_program();
migraphx::rewrite_batchnorm opt;
opt.apply(*p2.get_main_module());
EXPECT(any_of(*p1.get_main_module(), &is_batch_norm));
EXPECT(none_of(*p2.get_main_module(), &is_batch_norm));
p1.compile(migraphx::ref::target{});
p2.compile(migraphx::ref::target{});
auto result1 = p1.eval({}).back();
auto result2 = p2.eval({}).back();
visit_all(result1, result2)([&](auto r1, auto r2) { EXPECT(migraphx::verify_range(r1, r2)); });
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
This diff is collapsed.
......@@ -33,7 +33,6 @@
#include <migraphx/matcher.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/apply_alpha_beta.hpp>
bool is_convolution(const migraphx::instruction& ins) { return ins.name() == "convolution"; }
......
......@@ -120,19 +120,45 @@ def batchnorm_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
@tf_test
def batchnorm_half_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float16,
shape=(1, 16, 16, 32),
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
......@@ -142,19 +168,21 @@ def batchnormv3_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.raw_ops.FusedBatchNormV3(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-6,
is_training=False,
name='batchnorm1')
......
......@@ -24,6 +24,7 @@
#include <iostream>
#include <vector>
#include <unordered_map>
#include <migraphx/common.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/simplify_reshapes.hpp>
......@@ -33,7 +34,6 @@
#include <migraphx/instruction.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/pooling.hpp>
......@@ -186,50 +186,94 @@ TEST_CASE(batchmatmul_test)
TEST_CASE(batchnorm_test)
{
float epsilon = 1.001e-5f;
float momentum = 0.9f;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::batch_norm_inference op{
epsilon, momentum, migraphx::op::batch_norm_inference::spatial};
migraphx::shape s0{migraphx::shape::float_type, {32}};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 32, 16, 16}});
std::vector<float> const_vals(32);
std::fill(const_vals.begin(), const_vals.end(), 1.0f);
auto l2 = mm->add_parameter("2", s0);
auto l3 = mm->add_parameter("3", s0);
auto l4 = mm->add_parameter("4", s0);
auto l1 = mm->add_literal(migraphx::literal{s0, const_vals});
mm->add_instruction(op, l0, l1, l2, l3, l4);
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(batchnorm_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::half_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_half_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(batchnormv3_test)
{
float epsilon = 1.0e-5f;
float momentum = 0.9f;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::op::batch_norm_inference op{
epsilon, momentum, migraphx::op::batch_norm_inference::spatial};
migraphx::shape s0{migraphx::shape::float_type, {32}};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 32, 16, 16}});
std::vector<float> const_vals(32);
std::fill(const_vals.begin(), const_vals.end(), 1.0f);
auto l2 = mm->add_parameter("2", s0);
auto l3 = mm->add_parameter("3", s0);
auto l4 = mm->add_parameter("4", s0);
auto l1 = mm->add_literal(migraphx::literal{s0, const_vals});
mm->add_instruction(op, l0, l1, l2, l3, l4);
auto prog = optimize_tf("batchnormv3_test.pb", true);
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, 32, 16, 16}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {32}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {32}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {32}});
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnormv3_test.pb", true);
EXPECT(p == prog);
}
......@@ -327,10 +371,9 @@ migraphx::program create_conv()
mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 32}}, weight_data);
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1, 1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
op.padding = {1, 1, 1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
auto l2 =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {3, 2, 0, 1}}}), l1);
mm->add_instruction(op, l0, l2);
......@@ -406,11 +449,10 @@ TEST_CASE(depthwiseconv_test)
mm->add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 1}}, weight_data);
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
op.group = 3;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
op.group = 3;
auto l3 =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {3, 2, 0, 1}}}), l1);
auto l4 = mm->add_instruction(migraphx::make_op("contiguous"), l3);
......
......@@ -25,20 +25,21 @@
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/make_op.hpp>
struct quant_conv_valid_mode : verify_program<quant_conv_valid_mode>
struct quant_conv_1d : verify_program<quant_conv_1d>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {2, 3, 4, 4}};
migraphx::shape a_shape{migraphx::shape::int8_type, {2, 3, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}};
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::valid},
migraphx::make_op("quant_convolution",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
pa,
pc);
return p;
......
......@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode>
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
return p;
}
};
......@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_1d : verify_program<test_batchnorm_1d>
{
const size_t size = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, size}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_1d_per_actv : verify_program<test_batchnorm_1d_per_actv>
{
const size_t d1 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.96f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_2d_per_actv : verify_program<test_batchnorm_2d_per_actv>
{
const size_t d1 = 2;
const size_t d2 = 4;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.9f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment