Commit 02395097 authored by Aditya Atluri's avatar Aditya Atluri
Browse files

used clang format to clean up

parent 121b3130
......@@ -44,18 +44,19 @@ struct cpu_batch_norm_inference
{
argument output{output_shape};
double epsilon = op.epsilon;
auto input = args[0];
auto mini_batch_mean = args[1].at<float>();
double epsilon = op.epsilon;
auto input = args[0];
auto mini_batch_mean = args[1].at<float>();
auto mini_batch_variance = args[2].at<float>();
auto gamma = args[3].at<float>();
auto beta = args[4].at<float>();
auto gamma = args[3].at<float>();
auto beta = args[4].at<float>();
visit_all(output, input) ([&](auto result, auto buffer) {
visit_all(output, input)([&](auto result, auto buffer) {
std::transform(buffer.begin(), buffer.end(), result.begin(), [&](auto x) {
return gamma * (x - mini_batch_mean) / std::sqrt(mini_batch_variance + epsilon) + beta;
return gamma * (x - mini_batch_mean) / std::sqrt(mini_batch_variance + epsilon) +
beta;
});
});
});
return output;
}
......@@ -515,10 +516,11 @@ struct cpu_apply
{
apply_map["convolution"] = extend_op<cpu_convolution, convolution>();
apply_map["gemm"] = extend_op<cpu_gemm, gemm>();
apply_map["batch_norm_inference"] = extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["reshape"] = extend_op<cpu_reshape, reshape>();
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>();
apply_map["transpose"] = extend_op<cpu_transpose, transpose>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["reshape"] = extend_op<cpu_reshape, reshape>();
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>();
apply_map["transpose"] = extend_op<cpu_transpose, transpose>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
......
......@@ -10,17 +10,18 @@ void batch_norm_inference_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {4}};
auto x = p.add_literal(migraph::literal{s, {1, 2, 3, 4}});
auto gamma = p.add_literal(migraph::literal{s, {1}});
auto beta = p.add_literal(migraph::literal{s, {0}});
auto mean = p.add_literal(migraph::literal{s, {0}});
auto x = p.add_literal(migraph::literal{s, {1, 2, 3, 4}});
auto gamma = p.add_literal(migraph::literal{s, {1}});
auto beta = p.add_literal(migraph::literal{s, {0}});
auto mean = p.add_literal(migraph::literal{s, {0}});
auto variance = p.add_literal(migraph::literal{s, {1}});
p.add_instruction(migraph::batch_norm_inference{}, x, mean, variance, gamma, beta);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> result_vector(4);
result.visit([&](auto output) {result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = { 1 / (1 + 1.0e-6), 2 / (1 + 1.0e-6), 3 / (1 + 1.0e-6), 4 / (1 + 1.0e-6)};
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
1 / (1 + 1.0e-6), 2 / (1 + 1.0e-6), 3 / (1 + 1.0e-6), 4 / (1 + 1.0e-6)};
EXPECT(test::verify_range(result_vector, gold));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment