Commit 3ece2168 authored by Scott Thornton's avatar Scott Thornton
Browse files

Merge branch 'master' into resnet18_demo

parents bc2146b0 32ba4d9a
...@@ -25,7 +25,7 @@ def rocmtestnode(variant, name, body) { ...@@ -25,7 +25,7 @@ def rocmtestnode(variant, name, body) {
} }
} }
withDockerContainer(image: image, args: '--device=/dev/kfd --device=/dev/dri --group-add video --cap-add SYS_PTRACE') { withDockerContainer(image: image, args: '--device=/dev/kfd --device=/dev/dri --group-add video --cap-add SYS_PTRACE --add-host="bzip2.org:46.235.226.80" --add-host="www.bzip2.org:46.235.226.80"') {
timeout(time: 1, unit: 'HOURS') { timeout(time: 1, unit: 'HOURS') {
body(cmake_build) body(cmake_build)
} }
......
...@@ -99,6 +99,8 @@ struct instruction ...@@ -99,6 +99,8 @@ struct instruction
}); });
} }
shape get_shape() const { return result; }
friend bool operator==(instruction_ref ref, const instruction& i) { return i == ref; } friend bool operator==(instruction_ref ref, const instruction& i) { return i == ref; }
friend bool operator!=(const instruction& i, instruction_ref ref) { return !(i == ref); } friend bool operator!=(const instruction& i, instruction_ref ref) { return !(i == ref); }
......
...@@ -15,6 +15,49 @@ ...@@ -15,6 +15,49 @@
namespace migraph { namespace migraph {
namespace gpu { namespace gpu {
struct miopen_batch_norm_inference
{
batch_norm_inference op;
std::string name() const { return "gpu::batch_norm_inference"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(6);
return op.compute_shape(
{inputs.at(0), inputs.at(1), inputs.at(2), inputs.at(3), inputs.at(4)});
}
argument compute(context& ctx, shape output_shape, std::vector<argument> args) const
{
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
auto bn_desc = make_tensor(args[3].get_shape());
float alpha = 1.0, beta = 0.0f;
// TODO: adityaatluri
// create bn-scale-bias-mean-variance descriptor for
// miopen call
miopenBatchNormalizationForwardInference(ctx.handle.get(),
miopenBatchNormMode_t(op.bn_mode),
&alpha,
&beta,
x_desc.get(),
args[0].implicit(),
y_desc.get(),
args[5].implicit(),
bn_desc.get(),
args[3].implicit(),
args[4].implicit(),
args[1].implicit(),
args[2].implicit(),
op.epsilon);
return args[5];
}
};
struct miopen_convolution struct miopen_convolution
{ {
convolution op; convolution op;
...@@ -261,6 +304,12 @@ struct miopen_apply ...@@ -261,6 +304,12 @@ struct miopen_apply
{ {
apply_contiguous(it); apply_contiguous(it);
} }
// TODO: adityaatluri
// tagging to easily find where code changed
else if(it->op.name() == "batch_norm_inference")
{
apply_batch_norm_inference(it);
}
} }
} }
...@@ -334,6 +383,32 @@ struct miopen_apply ...@@ -334,6 +383,32 @@ struct miopen_apply
auto output = insert_allocation(ins, ins->result); auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(ins, miopen_contiguous{op}, ins->arguments.at(0), output); prog->replace_instruction(ins, miopen_contiguous{op}, ins->arguments.at(0), output);
} }
// TODO: adityaatluri
// Not sure how to write this. Review and fix required
void apply_batch_norm_inference(instruction_ref ins)
{
auto&& op = any_cast<batch_norm_inference>(ins->op);
auto output = insert_allocation(ins, ins->result);
shape old_shape = ins->arguments.at(1)->get_shape();
std::vector<int64_t> new_shape{1, static_cast<int64_t>(old_shape.elements()), 1, 1};
auto arg1 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(1));
auto arg2 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(2));
auto arg3 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(3));
auto arg4 =
prog->insert_instruction(ins, migraph::reshape{new_shape}, ins->arguments.at(4));
prog->replace_instruction(ins,
miopen_batch_norm_inference{op},
ins->arguments.at(0),
arg1,
arg2,
arg3,
arg4,
output);
}
}; };
void lowering::apply(program& p) const { miopen_apply{&p}.apply(); } void lowering::apply(program& p) const { miopen_apply{&p}.apply(); }
......
...@@ -219,6 +219,72 @@ struct test_transpose ...@@ -219,6 +219,72 @@ struct test_transpose
} }
}; };
struct test_batchnorm_inference
{
const size_t width = 3;
const size_t height = 3;
const size_t channels = 3;
const size_t batches = 4;
migraph::program create_program() const
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {batches, channels, height, width}};
migraph::shape vars{migraph::shape::float_type, {channels}};
auto x = p.add_parameter("x", s);
auto mean = p.add_parameter("mean", vars);
auto variance = p.add_parameter("variance", vars);
auto scale = p.add_parameter("scale", vars);
auto bias = p.add_parameter("bias", vars);
p.add_instruction(migraph::batch_norm_inference{}, x, mean, variance, scale, bias);
return p;
}
};
void batch_norm_inference_test()
{
migraph::program p;
const size_t width = 2, height = 2, channels = 4, batches = 2;
const float x_val = 8.0f, mean_val = 2.0f, variance_val = 4.0f, scale_val = 2.0f,
bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraph::shape s{migraph::shape::float_type, {batches, channels, height, width}};
migraph::shape vars{migraph::shape::float_type, {channels}};
std::vector<float> x_data(width * height * channels * batches);
std::vector<float> scale_data(channels);
std::vector<float> bias_data(channels);
std::vector<float> mean_data(channels);
std::vector<float> variance_data(channels);
std::fill(x_data.begin(), x_data.end(), x_val);
std::fill(mean_data.begin(), mean_data.end(), mean_val);
std::fill(variance_data.begin(), variance_data.end(), variance_val);
std::fill(scale_data.begin(), scale_data.end(), scale_val);
std::fill(bias_data.begin(), bias_data.end(), bias_val);
auto x = p.add_literal(migraph::literal{s, x_data});
auto scale = p.add_literal(migraph::literal{vars, scale_data});
auto bias = p.add_literal(migraph::literal{vars, bias_data});
auto mean = p.add_literal(migraph::literal{vars, mean_data});
auto variance = p.add_literal(migraph::literal{vars, variance_data});
p.add_instruction(migraph::batch_norm_inference{}, x, mean, variance, scale, bias);
p.compile(migraph::gpu::target{});
migraph::program::parameter_map m;
m["output"] = migraph::gpu::to_gpu(migraph::generate_argument(p.get_parameter_shape("output")));
auto result = migraph::gpu::from_gpu(p.eval(m));
std::vector<float> result_vector(width * height * channels * batches);
std::vector<float> gold(width * height * channels * batches);
std::fill(gold.begin(), gold.end(), output_val);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
EXPECT(test::verify_range(result_vector, gold));
}
int main() int main()
{ {
verify_program<test_add>(); verify_program<test_add>();
...@@ -232,4 +298,6 @@ int main() ...@@ -232,4 +298,6 @@ int main()
verify_program<test_gemm_transposeab>(); verify_program<test_gemm_transposeab>();
verify_program<test_contiguous>(); verify_program<test_contiguous>();
verify_program<test_transpose>(); verify_program<test_transpose>();
verify_program<test_batchnorm_inference>();
batch_norm_inference_test();
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment