Unverified Commit 417a2849 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge pull request #17 from adityaatluri/batch_norm_cpu

added batch norm inference and its test 
parents 1e66a536 02395097
...@@ -103,6 +103,24 @@ struct not_computable ...@@ -103,6 +103,24 @@ struct not_computable
} }
}; };
struct batch_norm_inference
{
double epsilon = 1.0e-6;
std::string name() const { return "batch_norm_inference"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(5);
return inputs.front();
}
argument compute(context&, shape, std::vector<argument>) const
{
MIGRAPH_THROW("not computable");
}
};
struct convolution struct convolution
{ {
std::array<std::size_t, 2> padding = {{0, 0}}; std::array<std::size_t, 2> padding = {{0, 0}};
......
...@@ -16,6 +16,52 @@ T zero(const T&) ...@@ -16,6 +16,52 @@ T zero(const T&)
return T(0); return T(0);
} }
//
// cpu implemenataion of batch norm for inference
//
// inputs are:
// args[0] -> input data buffer
// args[1] -> mini batch mean
// args[2] -> mini batch variance
// args[3] -> gamma
// args[4] -> beta
//
// The equation to compute batch norm for inference is:
//
// output[i] = beta + gamma * (input[i] + mean) / sqrt(variance + epsilon)
//
// the input data format should be nchw
//
struct cpu_batch_norm_inference
{
batch_norm_inference op;
std::string name() const { return "cpu::batch_norm_inference"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument output{output_shape};
double epsilon = op.epsilon;
auto input = args[0];
auto mini_batch_mean = args[1].at<float>();
auto mini_batch_variance = args[2].at<float>();
auto gamma = args[3].at<float>();
auto beta = args[4].at<float>();
visit_all(output, input)([&](auto result, auto buffer) {
std::transform(buffer.begin(), buffer.end(), result.begin(), [&](auto x) {
return gamma * (x - mini_batch_mean) / std::sqrt(mini_batch_variance + epsilon) +
beta;
});
});
return output;
}
};
struct cpu_convolution struct cpu_convolution
{ {
convolution op; convolution op;
...@@ -470,9 +516,11 @@ struct cpu_apply ...@@ -470,9 +516,11 @@ struct cpu_apply
{ {
apply_map["convolution"] = extend_op<cpu_convolution, convolution>(); apply_map["convolution"] = extend_op<cpu_convolution, convolution>();
apply_map["gemm"] = extend_op<cpu_gemm, gemm>(); apply_map["gemm"] = extend_op<cpu_gemm, gemm>();
apply_map["reshape"] = extend_op<cpu_reshape, reshape>(); apply_map["batch_norm_inference"] =
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>(); extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["transpose"] = extend_op<cpu_transpose, transpose>(); apply_map["reshape"] = extend_op<cpu_reshape, reshape>();
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>();
apply_map["transpose"] = extend_op<cpu_transpose, transpose>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>(); apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>(); apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
......
...@@ -6,6 +6,25 @@ ...@@ -6,6 +6,25 @@
#include "test.hpp" #include "test.hpp"
#include "verify.hpp" #include "verify.hpp"
void batch_norm_inference_test()
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {4}};
auto x = p.add_literal(migraph::literal{s, {1, 2, 3, 4}});
auto gamma = p.add_literal(migraph::literal{s, {1}});
auto beta = p.add_literal(migraph::literal{s, {0}});
auto mean = p.add_literal(migraph::literal{s, {0}});
auto variance = p.add_literal(migraph::literal{s, {1}});
p.add_instruction(migraph::batch_norm_inference{}, x, mean, variance, gamma, beta);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> result_vector(4);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
1 / (1 + 1.0e-6), 2 / (1 + 1.0e-6), 3 / (1 + 1.0e-6), 4 / (1 + 1.0e-6)};
EXPECT(test::verify_range(result_vector, gold));
}
void exp_test() void exp_test()
{ {
migraph::program p; migraph::program p;
...@@ -625,4 +644,5 @@ int main() ...@@ -625,4 +644,5 @@ int main()
conv2d_test(); conv2d_test();
conv2d_padding_test(); conv2d_padding_test();
conv2d_padding_stride_test(); conv2d_padding_stride_test();
batch_norm_inference_test();
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment