Commit e861793a authored by Khalique's avatar Khalique
Browse files

added onnx parsing

parent a79ab4d7
......@@ -51,13 +51,13 @@ struct batch_norm_inference
}
};
struct LRN
struct lrn
{
float alpha = 0.0001;
float beta = 0.75;
float bias = 1.0;
int size;
std::string name() const { return "LRN"; }
std::string name() const { return "lrn"; }
shape compute_shape(std::vector<shape> inputs) const
{
......@@ -65,7 +65,7 @@ struct LRN
return inputs.front();
}
friend std::ostream& operator<<(std::ostream& os, const LRN& op)
friend std::ostream& operator<<(std::ostream& os, const lrn& op)
{
os << op.name() << ":" << op.alpha << ":" << op.beta << ":" << op.bias << ":" << op.size;
return os;
......
......@@ -59,6 +59,7 @@ struct onnx_parser
// disable dropout for inference
add_generic_op("Dropout", op::identity{});
add_mem_op("LRN", &onnx_parser::parse_lrn);
add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
add_mem_op("Constant", &onnx_parser::parse_constant);
......@@ -336,6 +337,26 @@ struct onnx_parser
return prog.add_instruction(op, args.front());
}
instruction_ref parse_lrn(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
float alpha = 0.0001;
float beta = 0.75;
float bias = 1.0;
int size = 1;
if(contains(attributes, "alpha"))
alpha = parse_value(attributes.at("alpha")).at<float>();
if(contains(attributes, "beta"))
beta = parse_value(attributes.at("beta")).at<float>();
if(contains(attributes, "bias"))
bias = parse_value(attributes.at("bias")).at<float>();
if(contains(attributes, "size"))
size = parse_value(attributes.at("size")).at<int>();
op::lrn op{alpha, beta, bias, size};
return prog.add_instruction(op, args.front());
}
instruction_ref parse_imagescaler(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
......
......@@ -94,11 +94,11 @@ struct cpu_batch_norm_inference
}
};
struct cpu_LRN
struct cpu_lrn
{
op::LRN op;
op::lrn op;
std::string name() const { return "cpu::LRN"; }
std::string name() const { return "cpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
......@@ -633,7 +633,7 @@ struct cpu_apply
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["LRN"] = extend_op<cpu_LRN, op::LRN>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>();
apply_map["leaky_relu"] = extend_op<cpu_unary<leaky_relu_op>, op::leaky_relu>();
......
......@@ -42,7 +42,7 @@ add_library(migraph_gpu
batchnorm.cpp
write_literals.cpp
rocblas.cpp
LRN.cpp
lrn.cpp
)
set_target_properties(migraph_gpu PROPERTIES EXPORT_NAME gpu)
rocm_clang_tidy_check(migraph_gpu)
......
......@@ -22,10 +22,10 @@ namespace migraph {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
struct miopen_LRN
struct miopen_lrn
{
shared<LRN_descriptor> ldesc;
std::string name() const { return "gpu::LRN"; }
shared<lrn_descriptor> ldesc;
std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
......
......@@ -22,7 +22,7 @@ using activation_descriptor = MIGRAPH_MANAGE_PTR(miopenActivationDescriptor_t,
using fusion_plan_descriptor = MIGRAPH_MANAGE_PTR(miopenFusionPlanDescriptor_t,
miopenDestroyFusionPlan);
using fused_operator_args = MIGRAPH_MANAGE_PTR(miopenOperatorArgs_t, miopenDestroyOperatorArgs);
using LRN_descriptor = MIGRAPH_MANAGE_PTR(miopenLRNDescriptor_t, miopenDestroyLRNDescriptor);
using lrn_descriptor = MIGRAPH_MANAGE_PTR(miopenLRNDescriptor_t, miopenDestroyLRNDescriptor);
template <class Result, class F, class... Ts>
Result make_obj(F f, Ts... xs)
......@@ -85,9 +85,9 @@ inline pooling_descriptor make_pooling(const migraph::op::pooling& op)
return p;
}
inline LRN_descriptor make_LRN(const migraph::op::LRN& op)
inline lrn_descriptor make_lrn(const migraph::op::lrn& op)
{
auto ldesc = make_obj<LRN_descriptor>(&miopenCreateLRNDescriptor);
auto ldesc = make_obj<lrn_descriptor>(&miopenCreateLRNDescriptor);
miopenSetLRNDescriptor(ldesc.get(), miopenLRNCrossChannel, op.size, op.alpha, op.beta, op.bias);
return ldesc;
}
......
......@@ -15,7 +15,7 @@
#include <migraph/gpu/context.hpp>
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/LRN.hpp>
#include <migraph/gpu/lrn.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
......@@ -64,9 +64,9 @@ struct miopen_apply
{
check_shape(s, apply_pooling(it));
}
else if(it->name() == "LRN")
else if(it->name() == "lrn")
{
check_shape(s, apply_LRN(it));
check_shape(s, apply_lrn(it));
}
else if(it->name() == "add")
{
......@@ -137,13 +137,13 @@ struct miopen_apply
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
}
instruction_ref apply_LRN(instruction_ref ins)
instruction_ref apply_lrn(instruction_ref ins)
{
auto&& op = any_cast<op::LRN>(ins->get_operator());
auto ldesc = make_LRN(op);
auto&& op = any_cast<op::lrn>(ins->get_operator());
auto ldesc = make_lrn(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_LRN{std::move(ldesc)}, ins->inputs().at(0), output);
ins, miopen_lrn{std::move(ldesc)}, ins->inputs().at(0), output);
}
instruction_ref apply_relu(instruction_ref ins)
......
#include <migraph/gpu/LRN.hpp>
#include <migraph/gpu/lrn.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
......@@ -8,13 +8,13 @@ namespace migraph {
inline namespace MIGRAPH_INLINE_NS {
namespace gpu {
shape miopen_LRN::compute_shape(const std::vector<shape>& inputs) const
shape miopen_lrn::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_LRN::compute(context& ctx,
argument miopen_lrn::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
......
......@@ -579,12 +579,12 @@ TEST_CASE(leaky_relu_test)
EXPECT(migraph::verify_range(results_vector, gold));
}
TEST_CASE(LRN_test)
TEST_CASE(lrn_test)
{
migraph::program p;
migraph::shape s{migraph::shape::float_type, {1, 5, 1, 1}};
auto l = p.add_literal(migraph::literal{s, {-2.0f, 1.0f, 0.f, 1.0f, 2.0f}});
p.add_instruction(migraph::op::LRN{0.0001, 0.75, 1, 5}, l);
p.add_instruction(migraph::op::lrn{0.0001, 0.75, 1, 5}, l);
p.compile(migraph::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(5);
......
......@@ -450,13 +450,13 @@ struct test_leaky_relu
}
};
struct test_LRN
struct test_lrn
{
migraph::program create_program() const
{
migraph::program p;
auto x = p.add_parameter("x", migraph::shape{migraph::shape::float_type, {1, 5, 2, 2}});
p.add_instruction(migraph::op::LRN{0.0001, 0.75, 1.0, 5}, x);
p.add_instruction(migraph::op::lrn{0.0001, 0.75, 1.0, 5}, x);
return p;
}
};
......@@ -840,7 +840,7 @@ struct test_conv_bn_relu_pooling2
int main()
{
verify_program<test_LRN>();
verify_program<test_lrn>();
verify_program<test_concat>();
verify_program<test_concat2>();
verify_program<test_concat_relu>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment