Commit 14a2464b authored by Paul's avatar Paul
Browse files

Upgrade to hcc 2.0

parent 3040b5e6
...@@ -18,7 +18,8 @@ argument miopen_sigmoid::compute(context& ctx, ...@@ -18,7 +18,8 @@ argument miopen_sigmoid::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -18,7 +18,8 @@ argument miopen_softmax::compute(context& ctx, ...@@ -18,7 +18,8 @@ argument miopen_softmax::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenSoftmaxForward(ctx.get_stream().get_miopen(), miopenSoftmaxForward(ctx.get_stream().get_miopen(),
......
...@@ -18,7 +18,8 @@ argument miopen_tanh::compute(context& ctx, ...@@ -18,7 +18,8 @@ argument miopen_tanh::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
float alpha = 1, beta = 0; float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape()); auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(), miopenActivationForward(ctx.get_stream().get_miopen(),
......
...@@ -333,9 +333,15 @@ TEST_CASE(im2col_3x3_with_padding_test) ...@@ -333,9 +333,15 @@ TEST_CASE(im2col_3x3_with_padding_test)
TEST_CASE(batch_norm_inference_test) TEST_CASE(batch_norm_inference_test)
{ {
migraphx::program p; migraphx::program p;
const size_t width = 2, height = 2, channels = 4, batches = 2; const size_t width = 2;
const float x_val = 8.0f, mean_val = 2.0f, variance_val = 4.0f, scale_val = 2.0f, const size_t height = 2;
bias_val = 1.0f; const size_t channels = 4;
const size_t batches = 2;
const float x_val = 8.0;
const float mean_val = 2.0;
const float variance_val = 4.0;
const float scale_val = 2.0f;
const float bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val; const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}}; migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
...@@ -793,11 +799,7 @@ void gemm_test() ...@@ -793,11 +799,7 @@ void gemm_test()
auto result = p.eval({}); auto result = p.eval({});
std::vector<T> results_vector(12); std::vector<T> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
float tol = 1e-6; EXPECT(migraphx::verify_range(results_vector, c));
for(int i = 0; i < results_vector.size(); i++)
{
EXPECT(std::abs(results_vector[i] - c[i]) < tol);
}
} }
TEST_CASE_REGISTER(gemm_test<float>) TEST_CASE_REGISTER(gemm_test<float>)
TEST_CASE_REGISTER(gemm_test<double>) TEST_CASE_REGISTER(gemm_test<double>)
...@@ -851,12 +853,7 @@ TEST_CASE(maxpool_test) ...@@ -851,12 +853,7 @@ TEST_CASE(maxpool_test)
// std::cout << result.get_shape() << std::endl; // std::cout << result.get_shape() << std::endl;
std::vector<float> results_vector(36); std::vector<float> results_vector(36);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
float tol = 1e-6; EXPECT(migraphx::verify_range(results_vector, c));
for(int i = 0; i < results_vector.size(); i++)
{
// std::cout << results_vector[i] << " " << c[i] << std::endl;
EXPECT(std::abs(results_vector[i] - c[i]) < tol);
}
} }
TEST_CASE(softmax_test) TEST_CASE(softmax_test)
......
...@@ -189,7 +189,7 @@ inline auto& get_test_cases() ...@@ -189,7 +189,7 @@ inline auto& get_test_cases()
inline void add_test_case(std::string name, std::function<void()> f) inline void add_test_case(std::string name, std::function<void()> f)
{ {
get_test_cases().emplace_back(name, f); get_test_cases().emplace_back(std::move(name), std::move(f));
} }
struct auto_register struct auto_register
......
...@@ -385,7 +385,7 @@ struct match_find_sum ...@@ -385,7 +385,7 @@ struct match_find_sum
migraphx::instruction_ref ins; migraphx::instruction_ref ins;
auto matcher() const { return match::name("sum"); } auto matcher() const { return match::name("sum"); }
void apply(migraphx::program&, match::matcher_result r) const { EXPECT(bool{r.result == ins}); } void apply(migraphx::program&, const match::matcher_result& r) const { EXPECT(bool{r.result == ins}); }
}; };
struct match_find_literal struct match_find_literal
...@@ -393,7 +393,7 @@ struct match_find_literal ...@@ -393,7 +393,7 @@ struct match_find_literal
migraphx::instruction_ref ins; migraphx::instruction_ref ins;
auto matcher() const { return match::name("@literal"); } auto matcher() const { return match::name("@literal"); }
void apply(migraphx::program&, match::matcher_result r) const void apply(migraphx::program&, const match::matcher_result& r) const
{ {
EXPECT(bool{r.result != ins}); EXPECT(bool{r.result != ins});
EXPECT(r.result->name() == "@literal"); EXPECT(r.result->name() == "@literal");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment