Commit 02395097 authored by Aditya Atluri's avatar Aditya Atluri
Browse files

used clang format to clean up

parent 121b3130
......@@ -51,9 +51,10 @@ struct cpu_batch_norm_inference
auto gamma = args[3].at<float>();
auto beta = args[4].at<float>();
visit_all(output, input) ([&](auto result, auto buffer) {
visit_all(output, input)([&](auto result, auto buffer) {
std::transform(buffer.begin(), buffer.end(), result.begin(), [&](auto x) {
return gamma * (x - mini_batch_mean) / std::sqrt(mini_batch_variance + epsilon) + beta;
return gamma * (x - mini_batch_mean) / std::sqrt(mini_batch_variance + epsilon) +
beta;
});
});
......@@ -515,7 +516,8 @@ struct cpu_apply
{
apply_map["convolution"] = extend_op<cpu_convolution, convolution>();
apply_map["gemm"] = extend_op<cpu_gemm, gemm>();
apply_map["batch_norm_inference"] = extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["reshape"] = extend_op<cpu_reshape, reshape>();
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>();
apply_map["transpose"] = extend_op<cpu_transpose, transpose>();
......
......@@ -19,8 +19,9 @@ void batch_norm_inference_test()
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> result_vector(4);
result.visit([&](auto output) {result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = { 1 / (1 + 1.0e-6), 2 / (1 + 1.0e-6), 3 / (1 + 1.0e-6), 4 / (1 + 1.0e-6)};
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
1 / (1 + 1.0e-6), 2 / (1 + 1.0e-6), 3 / (1 + 1.0e-6), 4 / (1 + 1.0e-6)};
EXPECT(test::verify_range(result_vector, gold));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment