Commit 8a3d1d09 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into py

parents fc8c2664 6972ad26
...@@ -23,6 +23,8 @@ using fusion_plan_descriptor = MIGRAPHX_MANAGE_PTR(miopenFusionPlanDescriptor_t, ...@@ -23,6 +23,8 @@ using fusion_plan_descriptor = MIGRAPHX_MANAGE_PTR(miopenFusionPlanDescriptor_t,
miopenDestroyFusionPlan); miopenDestroyFusionPlan);
using fused_operator_args = MIGRAPHX_MANAGE_PTR(miopenOperatorArgs_t, miopenDestroyOperatorArgs); using fused_operator_args = MIGRAPHX_MANAGE_PTR(miopenOperatorArgs_t, miopenDestroyOperatorArgs);
using lrn_descriptor = MIGRAPHX_MANAGE_PTR(miopenLRNDescriptor_t, miopenDestroyLRNDescriptor);
template <class Result, class F, class... Ts> template <class Result, class F, class... Ts>
Result make_obj(F f, Ts... xs) Result make_obj(F f, Ts... xs)
{ {
...@@ -89,6 +91,13 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op) ...@@ -89,6 +91,13 @@ inline pooling_descriptor make_pooling(const migraphx::op::pooling& op)
return p; return p;
} }
inline lrn_descriptor make_lrn(const migraphx::op::lrn& op)
{
auto ldesc = make_obj<lrn_descriptor>(&miopenCreateLRNDescriptor);
miopenSetLRNDescriptor(ldesc.get(), miopenLRNCrossChannel, op.size, op.alpha, op.beta, op.bias);
return ldesc;
}
inline activation_descriptor make_relu() inline activation_descriptor make_relu()
{ {
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor); auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
......
#ifndef MIGRAPHX_GUARD_RTGLIB_SUB_HPP
#define MIGRAPHX_GUARD_RTGLIB_SUB_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/oper.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/sub.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/config.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_sub : binary_device<hip_sub, device::sub>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <migraphx/gpu/elu.hpp> #include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp> #include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/add.hpp> #include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/sub.hpp>
#include <migraphx/gpu/exp.hpp> #include <migraphx/gpu/exp.hpp>
#include <migraphx/gpu/log.hpp> #include <migraphx/gpu/log.hpp>
#include <migraphx/gpu/sin.hpp> #include <migraphx/gpu/sin.hpp>
...@@ -42,6 +43,7 @@ ...@@ -42,6 +43,7 @@
#include <migraphx/gpu/concat.hpp> #include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/pad.hpp> #include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp> #include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/lrn.hpp>
#include <utility> #include <utility>
#include <functional> #include <functional>
#include <algorithm> #include <algorithm>
...@@ -55,6 +57,7 @@ struct miopen_apply ...@@ -55,6 +57,7 @@ struct miopen_apply
program* prog = nullptr; program* prog = nullptr;
context ctx{}; context ctx{};
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{}; std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
instruction_ref last{};
void check_shape(shape x, instruction_ref i) void check_shape(shape x, instruction_ref i)
{ {
...@@ -65,6 +68,7 @@ struct miopen_apply ...@@ -65,6 +68,7 @@ struct miopen_apply
void init() void init()
{ {
this->last = instruction::get_output_alias(std::prev(prog->end()));
add_miopen_simple_op<miopen_relu>("relu", make_relu); add_miopen_simple_op<miopen_relu>("relu", make_relu);
add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid); add_miopen_simple_op<miopen_sigmoid>("sigmoid", make_sigmoid);
add_miopen_simple_op<miopen_abs>("abs", make_abs); add_miopen_simple_op<miopen_abs>("abs", make_abs);
...@@ -74,6 +78,7 @@ struct miopen_apply ...@@ -74,6 +78,7 @@ struct miopen_apply
add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu); add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu);
add_generic_op<hip_add>("add"); add_generic_op<hip_add>("add");
add_generic_op<hip_sub>("sub");
add_generic_op<hip_exp>("exp"); add_generic_op<hip_exp>("exp");
add_generic_op<hip_log>("log"); add_generic_op<hip_log>("log");
add_generic_op<hip_sin>("sin"); add_generic_op<hip_sin>("sin");
...@@ -95,6 +100,7 @@ struct miopen_apply ...@@ -95,6 +100,7 @@ struct miopen_apply
add_extend_op<hip_gather, op::gather>("gather"); add_extend_op<hip_gather, op::gather>("gather");
add_extend_op<hip_pad, op::pad>("pad"); add_extend_op<hip_pad, op::pad>("pad");
add_lrn_op();
add_convolution_op(); add_convolution_op();
add_pooling_op(); add_pooling_op();
add_batch_norm_inference_op(); add_batch_norm_inference_op();
...@@ -115,7 +121,7 @@ struct miopen_apply ...@@ -115,7 +121,7 @@ struct miopen_apply
instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "") instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "")
{ {
if(ins == --prog->end() and tag.empty()) if(ins == last and tag.empty())
{ {
return prog->add_parameter("output", s); return prog->add_parameter("output", s);
} }
...@@ -155,6 +161,17 @@ struct miopen_apply ...@@ -155,6 +161,17 @@ struct miopen_apply
}); });
} }
void add_lrn_op()
{
apply_map.emplace("lrn", [=](instruction_ref ins) {
auto&& op = any_cast<op::lrn>(ins->get_operator());
auto ldesc = make_lrn(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_lrn{std::move(ldesc)}, ins->inputs().at(0), output);
});
}
template <class T> template <class T>
void add_generic_op(std::string name) void add_generic_op(std::string name)
{ {
......
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_lrn::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_lrn::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenLRNForward(ctx.get_stream().get_miopen(),
ldesc.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit(),
false,
nullptr);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <migraphx/eliminate_contiguous.hpp> #include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/common_subexpression_elimination.hpp> #include <migraphx/common_subexpression_elimination.hpp>
#include <migraphx/fwd_conv_batchnorm_rewrite.hpp> #include <migraphx/fwd_conv_batchnorm_rewrite.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/eliminate_concat.hpp> #include <migraphx/eliminate_concat.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp> #include <migraphx/gpu/concat_gpu_opt.hpp>
...@@ -31,14 +32,16 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const ...@@ -31,14 +32,16 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const
dead_code_elimination{}, dead_code_elimination{},
fwd_conv_batchnorm_rewrite{}, fwd_conv_batchnorm_rewrite{},
dead_code_elimination{}, dead_code_elimination{},
common_subexpression_elimination{}, rewrite_rnn{},
dead_code_elimination{}, dead_code_elimination{},
//common_subexpression_elimination{},
//dead_code_elimination{},
simplify_algebra{}, simplify_algebra{},
dead_code_elimination{}, dead_code_elimination{},
constant_propagate{}, constant_propagate{},
dead_code_elimination{}, dead_code_elimination{},
auto_contiguous{}, auto_contiguous{},
simplify_reshapes{}, //simplify_reshapes{},
dead_code_elimination{}, dead_code_elimination{},
lowering{ctx}, lowering{ctx},
eliminate_concat{concat_gpu_optimization{}}, eliminate_concat{concat_gpu_optimization{}},
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp> #include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp" #include "test.hpp"
float sigmoid(float x) { return 1 / (1 + expf(-x)); } float sigmoid(float x) { return 1 / (1 + expf(-x)); }
...@@ -731,6 +732,20 @@ TEST_CASE(leaky_relu_test) ...@@ -731,6 +732,20 @@ TEST_CASE(leaky_relu_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(lrn_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {1, 5, 1, 1}};
auto l = p.add_literal(migraphx::literal{s, {-2.0f, 1.0f, 0.f, 1.0f, 2.0f}});
p.add_instruction(migraphx::op::lrn{0.0001, 0.75, 1, 5}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(5);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2 / 1.000075, 1 / 1.00009, 0 / 1.000145, 1 / 1.00009, 2 / 1.000075};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(imagescaler_test) TEST_CASE(imagescaler_test)
{ {
migraphx::program p; migraphx::program p;
......
This diff is collapsed.
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <test.hpp> #include <test.hpp>
struct dce_target struct dce_target
...@@ -111,4 +112,21 @@ TEST_CASE(depth_test) ...@@ -111,4 +112,21 @@ TEST_CASE(depth_test)
EXPECT(result != migraphx::literal{4}); EXPECT(result != migraphx::literal{4});
} }
TEST_CASE(undefined_test)
{
migraphx::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
auto undef = p.add_instruction(migraphx::op::undefined{});
p.add_instruction(sum_op{}, one, two);
auto count = std::distance(p.begin(), p.end());
p.compile(dce_target{});
EXPECT(std::distance(p.begin(), p.end()) == count - 1);
EXPECT(not p.has_instruction(undef));
auto result = p.eval({});
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
This diff is collapsed.
 subtraction2:q

0
1out"Sub subtraction2Z
0




Z
1


b
out




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment