Commit e08b425f authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into refactor_dynamic_compute

parents fbe13c96 5fa42993
......@@ -394,6 +394,31 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(p == prog);
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 5}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {5}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {5}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {5}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {5}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_rank_2_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p;
......
......@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_rank_2_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {2, 5}};
migraphx::shape c_shape(migraphx::shape::float_type, {5});
std::vector<float> x_data = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
std::vector<float> scale_data(5, 1.);
std::vector<float> bias_data(5, 0.);
std::vector<float> mean_data = {1., 2., 1., 2., 1.};
std::vector<float> variance_data(5, 0.5);
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.,
0.,
2.8284243,
2.8284243,
5.65684859,
7.07106074,
7.07106074,
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
......
......@@ -81,16 +81,6 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(batch_norm_inference_shape)
{
const size_t channels = 3;
migraphx::shape s{migraphx::shape::float_type, {4, channels, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
expect_shape(s, migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars);
throws_shape(migraphx::make_op("batch_norm_inference"), s);
throws_shape(migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars, vars);
}
TEST_CASE(broadcast)
{
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -120,19 +120,45 @@ def batchnorm_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
@tf_test
def batchnorm_half_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float16,
shape=(1, 16, 16, 32),
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-4,
is_training=False,
name='batchnorm1')
......@@ -142,19 +168,21 @@ def batchnormv3_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
name='x')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='scale')
g1_offset = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='bias')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='mean')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
name='variance')
tf.raw_ops.FusedBatchNormV3(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
epsilon=1e-6,
is_training=False,
name='batchnorm1')
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment