Commit 35b5b51f authored by Paul's avatar Paul
Browse files

Clean bn

parent 48f35aa0
......@@ -37,9 +37,6 @@ struct miopen_batch_norm_inference
float alpha = 1.0, beta = 0.0f;
// TODO: adityaatluri
// create bn-scale-bias-mean-variance descriptor for
// miopen call
miopenBatchNormalizationForwardInference(ctx.handle.get(),
miopenBatchNormMode_t(op.bn_mode),
&alpha,
......@@ -328,8 +325,6 @@ struct miopen_apply
{
apply_contiguous(it);
}
// TODO: adityaatluri
// tagging to easily find where code changed
else if(it->op.name() == "batch_norm_inference")
{
apply_batch_norm_inference(it);
......@@ -409,29 +404,24 @@ struct miopen_apply
prog->replace_instruction(ins, miopen_contiguous{op}, ins->arguments.at(0), output);
}
// TODO: adityaatluri
// Not sure how to write this. Review and fix required
void apply_batch_norm_inference(instruction_ref ins)
{
auto&& op = any_cast<batch_norm_inference>(ins->op);
auto output = insert_allocation(ins, ins->result);
shape old_shape = ins->arguments.at(1)->get_shape();
std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1};
auto arg1 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(1));
auto arg2 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(2));
auto arg3 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(3));
auto arg4 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(4));
auto reshape_op = reshape{new_shape};
std::vector<instruction_ref> reshapes;
std::transform(ins->arguments.begin()+1, ins->arguments.end(), std::back_inserter(reshapes), [&](auto i) {
return prog->insert_instruction(ins, reshape_op, i);
});
prog->replace_instruction(ins,
miopen_batch_norm_inference{op},
ins->arguments.at(0),
arg1,
arg2,
arg3,
arg4,
reshapes[0],
reshapes[1],
reshapes[2],
reshapes[3],
output);
}
};
......
......@@ -281,49 +281,6 @@ struct test_batchnorm_inference
}
};
void batch_norm_inference_test()
{
migraph::program p;
const size_t width = 2, height = 2, channels = 4, batches = 2;
const float x_val = 8.0f, mean_val = 2.0f, variance_val = 4.0f, scale_val = 2.0f,
bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraph::shape s{migraph::shape::float_type, {batches, channels, height, width}};
migraph::shape vars{migraph::shape::float_type, {channels}};
std::vector<float> x_data(width * height * channels * batches);
std::vector<float> scale_data(channels);
std::vector<float> bias_data(channels);
std::vector<float> mean_data(channels);
std::vector<float> variance_data(channels);
std::fill(x_data.begin(), x_data.end(), x_val);
std::fill(mean_data.begin(), mean_data.end(), mean_val);
std::fill(variance_data.begin(), variance_data.end(), variance_val);
std::fill(scale_data.begin(), scale_data.end(), scale_val);
std::fill(bias_data.begin(), bias_data.end(), bias_val);
auto x = p.add_literal(migraph::literal{s, x_data});
auto scale = p.add_literal(migraph::literal{vars, scale_data});
auto bias = p.add_literal(migraph::literal{vars, bias_data});
auto mean = p.add_literal(migraph::literal{vars, mean_data});
auto variance = p.add_literal(migraph::literal{vars, variance_data});
p.add_instruction(migraph::batch_norm_inference{}, x, mean, variance, scale, bias);
p.compile(migraph::gpu::target{});
migraph::program::parameter_map m;
m["output"] = migraph::gpu::to_gpu(migraph::generate_argument(p.get_parameter_shape("output")));
auto result = migraph::gpu::from_gpu(p.eval(m));
std::vector<float> result_vector(width * height * channels * batches);
std::vector<float> gold(width * height * channels * batches);
std::fill(gold.begin(), gold.end(), output_val);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
EXPECT(test::verify_range(result_vector, gold));
}
int main()
{
verify_program<test_add>();
......@@ -338,5 +295,4 @@ int main()
verify_program<test_contiguous>();
verify_program<test_transpose>();
verify_program<test_batchnorm_inference>();
batch_norm_inference_test();
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment