Commit 225d4c1b authored by Paul's avatar Paul
Browse files

Formatting

parent 14a2464b
...@@ -18,7 +18,7 @@ const std::string& get_type_name() ...@@ -18,7 +18,7 @@ const std::string& get_type_name()
name = typeid(PrivateMigraphTypeNameProbe).name(); name = typeid(PrivateMigraphTypeNameProbe).name();
name = name.substr(7); name = name.substr(7);
#else #else
const char parameter_name[] = "PrivateMigraphTypeNameProbe =";// NOLINT const char parameter_name[] = "PrivateMigraphTypeNameProbe ="; // NOLINT
name = __PRETTY_FUNCTION__; name = __PRETTY_FUNCTION__;
......
...@@ -36,8 +36,8 @@ read_mnist_images(const std::string& full_path, int& number_of_images, int& imag ...@@ -36,8 +36,8 @@ read_mnist_images(const std::string& full_path, int& number_of_images, int& imag
if(file.is_open()) if(file.is_open())
{ {
int magic_number = 0; int magic_number = 0;
int n_rows = 0; int n_rows = 0;
int n_cols = 0; int n_cols = 0;
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number)); file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number); magic_number = reverse_int(magic_number);
......
...@@ -19,7 +19,7 @@ argument miopen_abs::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_abs::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -23,7 +23,7 @@ argument miopen_batch_norm_inference::compute(context& ctx, ...@@ -23,7 +23,7 @@ argument miopen_batch_norm_inference::compute(context& ctx,
auto bn_desc = make_tensor(args[3].get_shape()); auto bn_desc = make_tensor(args[3].get_shape());
float alpha = 1.0; float alpha = 1.0;
float beta = 0.0f; float beta = 0.0f;
miopenBatchNormalizationForwardInference(ctx.get_stream().get_miopen(), miopenBatchNormalizationForwardInference(ctx.get_stream().get_miopen(),
miopenBatchNormMode_t(op.bn_mode), miopenBatchNormMode_t(op.bn_mode),
......
...@@ -22,7 +22,7 @@ argument miopen_convolution::compute(context& ctx, ...@@ -22,7 +22,7 @@ argument miopen_convolution::compute(context& ctx,
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
miopenConvolutionForward(ctx.get_stream().get_miopen(), miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha, &alpha,
x_desc.get(), x_desc.get(),
......
...@@ -19,7 +19,7 @@ argument miopen_elu::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_elu::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -266,7 +266,7 @@ struct miopen_conv_bias ...@@ -266,7 +266,7 @@ struct miopen_conv_bias
{ {
auto fargs = make_fused_args(); auto fargs = make_fused_args();
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit()); miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit()); miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
return f.execute(ctx, fargs, args[0], args[4]); return f.execute(ctx, fargs, args[0], args[4]);
...@@ -310,7 +310,7 @@ struct miopen_conv_bias_relu ...@@ -310,7 +310,7 @@ struct miopen_conv_bias_relu
{ {
auto fargs = make_fused_args(); auto fargs = make_fused_args();
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit()); miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit()); miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0); miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
......
...@@ -19,7 +19,7 @@ argument miopen_leaky_relu::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_leaky_relu::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -21,7 +21,7 @@ argument miopen_pooling::compute(context& ctx, ...@@ -21,7 +21,7 @@ argument miopen_pooling::compute(context& ctx,
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
miopenPoolingForward(ctx.get_stream().get_miopen(), miopenPoolingForward(ctx.get_stream().get_miopen(),
pd.get(), pd.get(),
......
...@@ -19,7 +19,7 @@ argument miopen_relu::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_relu::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -19,7 +19,7 @@ argument miopen_sigmoid::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_sigmoid::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -19,7 +19,7 @@ argument miopen_softmax::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_softmax::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenSoftmaxForward(ctx.get_stream().get_miopen(), miopenSoftmaxForward(ctx.get_stream().get_miopen(),
......
...@@ -19,7 +19,7 @@ argument miopen_tanh::compute(context& ctx, ...@@ -19,7 +19,7 @@ argument miopen_tanh::compute(context& ctx,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1; float alpha = 1;
float beta = 0; float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -333,15 +333,15 @@ TEST_CASE(im2col_3x3_with_padding_test) ...@@ -333,15 +333,15 @@ TEST_CASE(im2col_3x3_with_padding_test)
TEST_CASE(batch_norm_inference_test) TEST_CASE(batch_norm_inference_test)
{ {
migraphx::program p; migraphx::program p;
const size_t width = 2; const size_t width = 2;
const size_t height = 2; const size_t height = 2;
const size_t channels = 4; const size_t channels = 4;
const size_t batches = 2; const size_t batches = 2;
const float x_val = 8.0; const float x_val = 8.0;
const float mean_val = 2.0; const float mean_val = 2.0;
const float variance_val = 4.0; const float variance_val = 4.0;
const float scale_val = 2.0f; const float scale_val = 2.0f;
const float bias_val = 1.0f; const float bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val; const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}}; migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
......
...@@ -385,7 +385,10 @@ struct match_find_sum ...@@ -385,7 +385,10 @@ struct match_find_sum
migraphx::instruction_ref ins; migraphx::instruction_ref ins;
auto matcher() const { return match::name("sum"); } auto matcher() const { return match::name("sum"); }
void apply(migraphx::program&, const match::matcher_result& r) const { EXPECT(bool{r.result == ins}); } void apply(migraphx::program&, const match::matcher_result& r) const
{
EXPECT(bool{r.result == ins});
}
}; };
struct match_find_literal struct match_find_literal
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment