Unverified Commit 56857acf authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge branch 'develop' into onnx_parse_tests

parents 746eee8c ba100593
......@@ -21,7 +21,8 @@ argument miopen_convolution::compute(context& ctx,
auto w_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
......@@ -63,20 +64,22 @@ shape miopen_convolution::compile(context& ctx,
int algo_count = 1;
miopenConvAlgoPerf_t perf;
miopenFindConvolutionForwardAlgorithm(ctx.get_stream().get_miopen(),
x_desc.get(),
x.implicit(),
w_desc.get(),
w.implicit(),
cd.get(),
y_desc.get(),
y.implicit(),
1,
&algo_count,
&perf,
workspace.implicit(),
workspace_size,
false);
auto status = miopenFindConvolutionForwardAlgorithm(ctx.get_stream().get_miopen(),
x_desc.get(),
x.implicit(),
w_desc.get(),
w.implicit(),
cd.get(),
y_desc.get(),
y.implicit(),
1,
&algo_count,
&perf,
workspace.implicit(),
workspace_size,
false);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Find convolution failed");
algo = perf.fwd_algo;
return shape{shape::int8_type, {perf.memory}};
}
......
......@@ -18,7 +18,8 @@ argument miopen_elu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -265,7 +265,8 @@ struct miopen_conv_bias
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto fargs = make_fused_args();
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
return f.execute(ctx, fargs, args[0], args[4]);
......@@ -308,7 +309,8 @@ struct miopen_conv_bias_relu
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto fargs = make_fused_args();
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
......
......@@ -16,7 +16,8 @@ std::string hip_error(int error) { return hipGetErrorString(static_cast<hipError
std::size_t get_available_gpu_memory()
{
size_t free, total;
size_t free;
size_t total;
auto status = hipMemGetInfo(&free, &total);
if(status != hipSuccess)
MIGRAPHX_THROW("Failed getting available memory: " + hip_error(status));
......@@ -72,13 +73,13 @@ argument allocate_gpu(const shape& s, bool host)
return {s, [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
argument to_gpu(argument arg, bool host)
argument to_gpu(const argument& arg, bool host)
{
auto p = share(write_to_gpu(arg.data(), arg.get_shape().bytes(), host));
return {arg.get_shape(), [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
argument from_gpu(argument arg)
argument from_gpu(const argument& arg)
{
argument result;
arg.visit([&](auto x) {
......@@ -98,7 +99,7 @@ void set_device(std::size_t id)
void gpu_sync() { hipDeviceSynchronize(); }
void copy_to_gpu(argument src, argument dst)
void copy_to_gpu(const argument& src, const argument& dst)
{
std::size_t src_size = src.get_shape().bytes();
std::size_t dst_size = dst.get_shape().bytes();
......
......@@ -9,17 +9,17 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
migraphx::argument allocate_gpu(const migraphx::shape& s, bool host = false);
argument allocate_gpu(const shape& s, bool host = false);
migraphx::argument to_gpu(migraphx::argument arg, bool host = false);
argument to_gpu(const argument& arg, bool host = false);
migraphx::argument from_gpu(migraphx::argument arg);
argument from_gpu(const argument& arg);
void set_device(std::size_t id);
void gpu_sync();
void copy_to_gpu(argument src, argument dst);
void copy_to_gpu(const argument& src, const argument& dst);
struct hip_allocate
{
......
......@@ -18,7 +18,8 @@ argument miopen_leaky_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -20,7 +20,8 @@ argument miopen_pooling::compute(context& ctx,
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
miopenPoolingForward(ctx.get_stream().get_miopen(),
pd.get(),
......
......@@ -18,7 +18,8 @@ argument miopen_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -18,7 +18,8 @@ argument miopen_sigmoid::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -18,7 +18,8 @@ argument miopen_softmax::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenSoftmaxForward(ctx.get_stream().get_miopen(),
......
......@@ -18,7 +18,8 @@ argument miopen_tanh::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1, beta = 0;
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
......
......@@ -333,9 +333,15 @@ TEST_CASE(im2col_3x3_with_padding_test)
TEST_CASE(batch_norm_inference_test)
{
migraphx::program p;
const size_t width = 2, height = 2, channels = 4, batches = 2;
const float x_val = 8.0f, mean_val = 2.0f, variance_val = 4.0f, scale_val = 2.0f,
bias_val = 1.0f;
const size_t width = 2;
const size_t height = 2;
const size_t channels = 4;
const size_t batches = 2;
const float x_val = 8.0;
const float mean_val = 2.0;
const float variance_val = 4.0;
const float scale_val = 2.0f;
const float bias_val = 1.0f;
const float output_val = scale_val * (x_val - mean_val) / (std::sqrt(variance_val)) + bias_val;
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
......@@ -753,37 +759,37 @@ template <class T>
void gemm_test()
{
migraphx::program p;
std::vector<T> a = {-0.00925222, 0.56250403, 0.70107397, 0.75402161, -0.505885,
std::vector<T> a = {-0.00925222, 0.56250403, 0.70107397, 0.75402161, -0.505885,
1.33628943, -0.11413, -0.31270559, 1.59336732, -0.19361027,
-0.91620867, 0.40108416, -0.06969921, 0.68483471, -0.39906632,
-1.66423624, 0.69040076, -1.31490171, -0.11282616, -0.79391814};
std::vector<T> b = {6.09568541e-01,
-6.10527007e-01,
3.66646462e-01,
1.18951101e-01,
5.58777432e-01,
-3.21296298e-01,
-5.95997198e-01,
-5.01425721e-01,
-2.84606807e-01,
-5.73673557e-01,
-8.99430260e-01,
-4.25103093e-01,
1.53027987e+00,
-3.81407415e-04,
-3.29650255e-01};
std::vector<T> c = {-1.56327541e+00,
-7.09570140e-01,
-5.37424982e-01,
-2.22994831e-01,
-2.15586437e+00,
2.09177941e-03,
-1.47279677e+00,
2.02627040e-01,
-6.04527691e-01,
-1.29885596e+00,
2.16294914e+00,
-1.48101497e-01};
std::vector<float> b = {6.09568541e-01,
-6.10527007e-01,
3.66646462e-01,
1.18951101e-01,
5.58777432e-01,
-3.21296298e-01,
-5.95997198e-01,
-5.01425721e-01,
-2.84606807e-01,
-5.73673557e-01,
-8.99430260e-01,
-4.25103093e-01,
1.53027987e+00,
-3.81407415e-04,
-3.29650255e-01};
std::vector<float> c = {-1.56327541e+00,
-7.09570140e-01,
-5.37424982e-01,
-2.22994831e-01,
-2.15586437e+00,
2.09177941e-03,
-1.47279677e+00,
2.02627040e-01,
-6.04527691e-01,
-1.29885596e+00,
2.16294914e+00,
-1.48101497e-01};
migraphx::shape a_shape{migraphx::shape::get_type<T>{}, {4, 5}};
auto al = p.add_literal(migraphx::literal{a_shape, a});
migraphx::shape b_shape{migraphx::shape::get_type<T>{}, {5, 3}};
......@@ -793,11 +799,7 @@ void gemm_test()
auto result = p.eval({});
std::vector<T> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
float tol = 1e-6;
for(int i = 0; i < results_vector.size(); i++)
{
EXPECT(std::abs(results_vector[i] - c[i]) < tol);
}
EXPECT(migraphx::verify_range(c, results_vector));
}
TEST_CASE_REGISTER(gemm_test<float>)
TEST_CASE_REGISTER(gemm_test<double>)
......@@ -851,12 +853,7 @@ TEST_CASE(maxpool_test)
// std::cout << result.get_shape() << std::endl;
std::vector<float> results_vector(36);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
float tol = 1e-6;
for(int i = 0; i < results_vector.size(); i++)
{
// std::cout << results_vector[i] << " " << c[i] << std::endl;
EXPECT(std::abs(results_vector[i] - c[i]) < tol);
}
EXPECT(migraphx::verify_range(results_vector, c));
}
TEST_CASE(softmax_test)
......
......@@ -30,6 +30,7 @@ struct mem_data_ptr
using type = T C::*;
};
// NOLINTNEXTLINE
#define MIGRAPHX_ROB(name, Type, C, mem) \
struct name##_tag : mem_data_ptr<C, Type> \
{ \
......
......@@ -189,7 +189,7 @@ inline auto& get_test_cases()
inline void add_test_case(std::string name, std::function<void()> f)
{
get_test_cases().emplace_back(name, f);
get_test_cases().emplace_back(std::move(name), std::move(f));
}
struct auto_register
......@@ -248,6 +248,7 @@ inline void run(int argc, const char* argv[])
// NOLINTNEXTLINE
#define TEST_CAT(x, ...) TEST_PRIMITIVE_CAT(x, __VA_ARGS__)
// NOLINTNEXTLINE
#define TEST_PRIMITIVE_CAT(x, ...) x##__VA_ARGS__
// NOLINTNEXTLINE
......
......@@ -385,7 +385,10 @@ struct match_find_sum
migraphx::instruction_ref ins;
auto matcher() const { return match::name("sum"); }
void apply(migraphx::program&, match::matcher_result r) const { EXPECT(bool{r.result == ins}); }
void apply(migraphx::program&, const match::matcher_result& r) const
{
EXPECT(bool{r.result == ins});
}
};
struct match_find_literal
......@@ -393,7 +396,7 @@ struct match_find_literal
migraphx::instruction_ref ins;
auto matcher() const { return match::name("@literal"); }
void apply(migraphx::program&, match::matcher_result r) const
void apply(migraphx::program&, const match::matcher_result& r) const
{
EXPECT(bool{r.result != ins});
EXPECT(r.result->name() == "@literal");
......
#ifndef MIGRAPH_GUARD_CONCAT_OPT_HPP
#define MIGRAPH_GUARD_CONCAT_OPT_HPP
#ifndef MIGRAPHX_GUARD_CONCAT_OPT_HPP
#define MIGRAPHX_GUARD_CONCAT_OPT_HPP
#include <cassert>
#include <string>
......@@ -10,8 +10,10 @@
#include <migraphx/operation.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct program;
......@@ -40,6 +42,7 @@ interface('concat_optimization',
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_CONTEXT_HPP
#define MIGRAPH_GUARD_CONTEXT_HPP
#ifndef MIGRAPHX_GUARD_CONTEXT_HPP
#define MIGRAPHX_GUARD_CONTEXT_HPP
#include <cassert>
#include <string>
......@@ -7,8 +7,10 @@
#include <memory>
#include <type_traits>
#include <utility>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
#ifdef DOXYGEN
......@@ -31,6 +33,7 @@ interface('context',
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_OPERAND_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_OPERAND_HPP
#ifndef MIGRAPHX_GUARD_MIGRAPHLIB_OPERAND_HPP
#define MIGRAPHX_GUARD_MIGRAPHLIB_OPERAND_HPP
#include <cassert>
#include <string>
......@@ -13,8 +13,10 @@
#include <migraphx/argument.hpp>
#include <migraphx/context.hpp>
#include <migraphx/auto_any_cast.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
#ifdef DOXYGEN
......@@ -101,7 +103,7 @@ template <class T>
argument compute_op(rank<0>, const T& x, context&, const shape&, const std::vector<argument>&)
{
std::string name = x.name();
MIGRAPH_THROW("Not computable: " + name);
MIGRAPHX_THROW("Not computable: " + name);
}
template <class T>
......@@ -165,6 +167,7 @@ int output_alias_op(const T& x, const std::vector<shape>& shapes)
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_PASS_HPP
#define MIGRAPH_GUARD_PASS_HPP
#ifndef MIGRAPHX_GUARD_PASS_HPP
#define MIGRAPHX_GUARD_PASS_HPP
#include <cassert>
#include <string>
......@@ -7,8 +7,10 @@
#include <memory>
#include <type_traits>
#include <utility>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct program;
......@@ -35,6 +37,7 @@ interface('pass',
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_TARGET_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_TARGET_HPP
#ifndef MIGRAPHX_GUARD_MIGRAPHLIB_TARGET_HPP
#define MIGRAPHX_GUARD_MIGRAPHLIB_TARGET_HPP
#include <cassert>
#include <string>
......@@ -10,8 +10,10 @@
#include <vector>
#include <migraphx/context.hpp>
#include <migraphx/pass.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
#ifdef DOXYGEN
......@@ -48,6 +50,7 @@ interface('target',
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment