Commit e320f89f authored by Shucai Xiao's avatar Shucai Xiao Committed by mvermeulen
Browse files

Reduce operators (#427)

* add reduce operators as required by onnxruntime

* clang format

* remove a test since it can cause overflow

* resolve cppcheck error

* clang format

* fix cppcheck error

* clang format
parent f60b5421
...@@ -40,6 +40,15 @@ struct zero ...@@ -40,6 +40,15 @@ struct zero
} }
}; };
struct one
{
template <class T>
operator T() const
{
return T{1};
}
};
template <class Derived> template <class Derived>
struct reduce_op : op_name<Derived> struct reduce_op : op_name<Derived>
{ {
......
#ifndef MIGRAPHX_GUARD_OPERATORS_REDUCE_PROD_HPP
#define MIGRAPHX_GUARD_OPERATORS_REDUCE_PROD_HPP
#include <migraphx/op/reduce_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct reduce_prod : reduce_op<reduce_prod>
{
reduce_prod() {}
reduce_prod(std::vector<int64_t> ax) : reduce_op(std::move(ax)) {}
auto op() const
{
return [=](auto x, auto y) { return x * y; };
}
auto init() const { return one(); }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -51,10 +51,11 @@ ...@@ -51,10 +51,11 @@
#include <migraphx/op/quant_convolution.hpp> #include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp> #include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/pow.hpp> #include <migraphx/op/pow.hpp>
#include <migraphx/op/reduce_sum.hpp> #include <migraphx/op/reduce_max.hpp>
#include <migraphx/op/reduce_mean.hpp> #include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/reduce_min.hpp> #include <migraphx/op/reduce_min.hpp>
#include <migraphx/op/reduce_max.hpp> #include <migraphx/op/reduce_prod.hpp>
#include <migraphx/op/reduce_sum.hpp>
#include <migraphx/op/relu.hpp> #include <migraphx/op/relu.hpp>
#include <migraphx/op/reshape.hpp> #include <migraphx/op/reshape.hpp>
#include <migraphx/op/rnn.hpp> #include <migraphx/op/rnn.hpp>
......
...@@ -106,10 +106,17 @@ struct onnx_parser ...@@ -106,10 +106,17 @@ struct onnx_parser
add_mem_op("GRU", &onnx_parser::parse_gru); add_mem_op("GRU", &onnx_parser::parse_gru);
add_mem_op("LSTM", &onnx_parser::parse_lstm); add_mem_op("LSTM", &onnx_parser::parse_lstm);
add_mem_op("Pad", &onnx_parser::parse_pad); add_mem_op("Pad", &onnx_parser::parse_pad);
add_mem_op("ReduceSum", &onnx_parser::parse_reduce_oper<op::reduce_sum>);
add_mem_op("ReduceL1", &onnx_parser::parse_reduce_l1);
add_mem_op("ReduceL2", &onnx_parser::parse_reduce_l2);
add_mem_op("ReduceLogSum", &onnx_parser::parse_reduce_log_sum);
add_mem_op("ReduceLogSumExp", &onnx_parser::parse_reduce_log_sum_exp);
add_mem_op("ReduceMax", &onnx_parser::parse_reduce_oper<op::reduce_max>);
add_mem_op("ReduceMean", &onnx_parser::parse_reduce_oper<op::reduce_mean>); add_mem_op("ReduceMean", &onnx_parser::parse_reduce_oper<op::reduce_mean>);
add_mem_op("ReduceMin", &onnx_parser::parse_reduce_oper<op::reduce_min>); add_mem_op("ReduceMin", &onnx_parser::parse_reduce_oper<op::reduce_min>);
add_mem_op("ReduceMax", &onnx_parser::parse_reduce_oper<op::reduce_max>); add_mem_op("ReduceProd", &onnx_parser::parse_reduce_oper<op::reduce_prod>);
add_mem_op("ReduceSum", &onnx_parser::parse_reduce_oper<op::reduce_sum>);
add_mem_op("ReduceSumSquare", &onnx_parser::parse_reduce_sum_square);
// init the activation function map // init the activation function map
init_actv_func(); init_actv_func();
...@@ -1481,6 +1488,47 @@ struct onnx_parser ...@@ -1481,6 +1488,47 @@ struct onnx_parser
} }
} }
instruction_ref
parse_reduce_l1(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
auto abs_ins = prog.add_instruction(op::abs{}, args[0]);
return parse_reduce_oper<op::reduce_sum>({}, std::move(attributes), {abs_ins});
}
instruction_ref
parse_reduce_l2(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
auto square_ins = prog.add_instruction(op::mul{}, args[0], args[0]);
auto sum_ins = parse_reduce_oper<op::reduce_sum>({}, std::move(attributes), {square_ins});
return prog.add_instruction(op::sqrt{}, sum_ins);
}
instruction_ref parse_reduce_log_sum(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
auto sum_ins =
parse_reduce_oper<op::reduce_sum>({}, std::move(attributes), std::move(args));
return prog.add_instruction(op::log{}, sum_ins);
}
instruction_ref parse_reduce_log_sum_exp(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
auto exp_ins = prog.add_instruction(op::exp{}, args[0]);
auto sum_ins = parse_reduce_oper<op::reduce_sum>({}, std::move(attributes), {exp_ins});
return prog.add_instruction(op::log{}, sum_ins);
}
instruction_ref parse_reduce_sum_square(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
auto square_ins = prog.add_instruction(op::mul{}, args[0], args[0]);
return parse_reduce_oper<op::reduce_sum>({}, std::move(attributes), {square_ins});
}
instruction_ref instruction_ref
parse_cast(const std::string&, attribute_map attributes, std::vector<instruction_ref> args) parse_cast(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{ {
......
...@@ -47,6 +47,7 @@ add_library(migraphx_device ...@@ -47,6 +47,7 @@ add_library(migraphx_device
device/reduce_mean.cpp device/reduce_mean.cpp
device/reduce_min.cpp device/reduce_min.cpp
device/reduce_sum.cpp device/reduce_sum.cpp
device/reduce_prod.cpp
device/relu.cpp device/relu.cpp
device/round.cpp device/round.cpp
device/rsqrt.cpp device/rsqrt.cpp
......
...@@ -20,6 +20,15 @@ struct sum ...@@ -20,6 +20,15 @@ struct sum
} }
}; };
struct product
{
template <class T, class U>
MIGRAPHX_DEVICE_CONSTEXPR auto operator()(T x, U y) const
{
return x * y;
}
};
struct id struct id
{ {
template <class T> template <class T>
......
#include <migraphx/gpu/device/reduce_prod.hpp>
#include <migraphx/gpu/device/reduce.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_prod(hipStream_t stream, const argument& result, const argument& arg)
{
reduce(stream, result, arg, product{}, 1, id{}, id{});
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_PROD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_PROD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_prod(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -32,6 +32,7 @@ struct reduce_op : oper<Derived> ...@@ -32,6 +32,7 @@ struct reduce_op : oper<Derived>
{ {
std::vector<shape> in_shapes{inputs}; std::vector<shape> in_shapes{inputs};
in_shapes.pop_back(); in_shapes.pop_back();
check_shapes{in_shapes}.standard();
return op.compute_shape(in_shapes); return op.compute_shape(in_shapes);
} }
......
#ifndef MIGRAPHX_GUARD_RTGLIB_REDUCE_PROD_HPP
#define MIGRAPHX_GUARD_RTGLIB_REDUCE_PROD_HPP
#include <migraphx/op/reduce_prod.hpp>
#include <migraphx/gpu/reduce_op.hpp>
#include <migraphx/gpu/device/reduce_prod.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_reduce_prod : reduce_op<hip_reduce_prod, op::reduce_prod, device::reduce_prod>
{
hip_reduce_prod() {}
hip_reduce_prod(const op::reduce_prod& op_ref) : reduce_op(op_ref) {}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -53,15 +53,16 @@ ...@@ -53,15 +53,16 @@
#include <migraphx/gpu/lrn.hpp> #include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/convert.hpp> #include <migraphx/gpu/convert.hpp>
#include <migraphx/gpu/clip.hpp> #include <migraphx/gpu/clip.hpp>
#include <migraphx/gpu/reduce_sum.hpp>
#include <migraphx/gpu/round.hpp> #include <migraphx/gpu/round.hpp>
#include <migraphx/gpu/ceil.hpp> #include <migraphx/gpu/ceil.hpp>
#include <migraphx/gpu/floor.hpp> #include <migraphx/gpu/floor.hpp>
#include <migraphx/gpu/rsqrt.hpp> #include <migraphx/gpu/rsqrt.hpp>
#include <migraphx/gpu/sqrt.hpp> #include <migraphx/gpu/sqrt.hpp>
#include <migraphx/gpu/reduce_max.hpp>
#include <migraphx/gpu/reduce_mean.hpp> #include <migraphx/gpu/reduce_mean.hpp>
#include <migraphx/gpu/reduce_min.hpp> #include <migraphx/gpu/reduce_min.hpp>
#include <migraphx/gpu/reduce_max.hpp> #include <migraphx/gpu/reduce_prod.hpp>
#include <migraphx/gpu/reduce_sum.hpp>
#include <migraphx/gpu/pow.hpp> #include <migraphx/gpu/pow.hpp>
#include <migraphx/gpu/sqdiff.hpp> #include <migraphx/gpu/sqdiff.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp> #include <migraphx/gpu/int8_conv_pack.hpp>
...@@ -144,10 +145,11 @@ struct miopen_apply ...@@ -144,10 +145,11 @@ struct miopen_apply
add_extend_op<hip_pad, op::pad>("pad"); add_extend_op<hip_pad, op::pad>("pad");
add_extend_op<hip_convert, op::convert>("convert"); add_extend_op<hip_convert, op::convert>("convert");
add_extend_op<hip_clip, op::clip>("clip"); add_extend_op<hip_clip, op::clip>("clip");
add_extend_op<hip_reduce_sum, op::reduce_sum>("reduce_sum"); add_extend_op<hip_reduce_max, op::reduce_max>("reduce_max");
add_extend_op<hip_reduce_mean, op::reduce_mean>("reduce_mean"); add_extend_op<hip_reduce_mean, op::reduce_mean>("reduce_mean");
add_extend_op<hip_reduce_min, op::reduce_min>("reduce_min"); add_extend_op<hip_reduce_min, op::reduce_min>("reduce_min");
add_extend_op<hip_reduce_max, op::reduce_max>("reduce_max"); add_extend_op<hip_reduce_prod, op::reduce_prod>("reduce_prod");
add_extend_op<hip_reduce_sum, op::reduce_sum>("reduce_sum");
add_gemm_op<op::dot>("dot"); add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot"); add_gemm_op<op::quant_dot>("quant_dot");
......
...@@ -1975,6 +1975,21 @@ TEST_CASE(clip_test) ...@@ -1975,6 +1975,21 @@ TEST_CASE(clip_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(reduce_prod_axis0)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {4, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 3, 2, 3}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_prod{{0}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{6, 18, 12, 18};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_sum_axis0) TEST_CASE(reduce_sum_axis0)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -4095,10 +4095,11 @@ struct test_reduce_op_large : verify_program<test_reduce_op_large<Op, Axis, T>> ...@@ -4095,10 +4095,11 @@ struct test_reduce_op_large : verify_program<test_reduce_op_large<Op, Axis, T>>
}; };
}; };
template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_mean, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_max, 1, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_max, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_mean, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_prod, 2, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
template <class Op, int Axis, migraphx::shape::type_t T> template <class Op, int Axis, migraphx::shape::type_t T>
struct test_reduce_op_small : verify_program<test_reduce_op_small<Op, Axis, T>> struct test_reduce_op_small : verify_program<test_reduce_op_small<Op, Axis, T>>
...@@ -4121,6 +4122,7 @@ template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shap ...@@ -4121,6 +4122,7 @@ template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shap
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::half_type>; template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::half_type>; template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_min, 2, migraphx::shape::half_type>; template struct test_reduce_op_small<migraphx::op::reduce_min, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_prod, -2, migraphx::shape::half_type>;
struct test_rsqrt : verify_program<test_rsqrt> struct test_rsqrt : verify_program<test_rsqrt>
{ {
......
...@@ -1152,10 +1152,70 @@ def pow_test(): ...@@ -1152,10 +1152,70 @@ def pow_test():
return ([node], [arg0, arg1], [arg_out]) return ([node], [arg0, arg1], [arg_out])
@onnx_test
def reducel1_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2]
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducel2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
axes = [-1]
node = onnx.helper.make_node('ReduceL2',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reduce_log_sum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 6])
axes = [-3]
node = onnx.helper.make_node('ReduceLogSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
@onnx_test
def reduce_log_sum_exp_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 5, 6])
axes = [-4]
node = onnx.helper.make_node('ReduceLogSumExp',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
@onnx_test @onnx_test
def reducemax_test(): def reducemax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node('ReduceMax', node = onnx.helper.make_node('ReduceMax',
...@@ -1213,12 +1273,12 @@ def reducemin_test(): ...@@ -1213,12 +1273,12 @@ def reducemin_test():
@onnx_test @onnx_test
def reducesum_test(): def reduceprod_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node('ReduceSum', node = onnx.helper.make_node('ReduceProd',
inputs=['x'], inputs=['x'],
outputs=['y'], outputs=['y'],
axes=axes, axes=axes,
...@@ -1228,10 +1288,10 @@ def reducesum_test(): ...@@ -1228,10 +1288,10 @@ def reducesum_test():
@onnx_test @onnx_test
def reducesum_multiaxis_test(): def reducesum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2, 3] axes = [2]
node = onnx.helper.make_node('ReduceSum', node = onnx.helper.make_node('ReduceSum',
inputs=['x'], inputs=['x'],
...@@ -1257,6 +1317,36 @@ def reducesum_keepdims_test(): ...@@ -1257,6 +1317,36 @@ def reducesum_keepdims_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def reducesum_multiaxis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3]
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducesum_square_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2]
node = onnx.helper.make_node('ReduceSumSquare',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test @onnx_test
def reshape_test(): def reshape_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3])
......
...@@ -883,6 +883,54 @@ TEST_CASE(pow_test) ...@@ -883,6 +883,54 @@ TEST_CASE(pow_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reducel1_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto abs_l0 = p.add_instruction(migraphx::op::abs{}, l0);
auto sum_l0 = p.add_instruction(migraphx::op::reduce_sum{{-2}}, abs_l0);
p.add_instruction(migraphx::op::squeeze{{-2}}, sum_l0);
auto prog = optimize_onnx("reducel1_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducel2_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto square_l0 = p.add_instruction(migraphx::op::mul{}, l0, l0);
auto sum_l0 = p.add_instruction(migraphx::op::reduce_sum{{-1}}, square_l0);
auto squ_l0 = p.add_instruction(migraphx::op::squeeze{{-1}}, sum_l0);
p.add_instruction(migraphx::op::sqrt{}, squ_l0);
auto prog = optimize_onnx("reducel2_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reduce_log_sum_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto sum_l0 = p.add_instruction(migraphx::op::reduce_sum{{-3}}, l0);
p.add_instruction(migraphx::op::log{}, sum_l0);
auto prog = optimize_onnx("reduce_log_sum_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reduce_log_sum_exp_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto exp_l0 = p.add_instruction(migraphx::op::exp{}, l0);
auto sum_l0 = p.add_instruction(migraphx::op::reduce_sum{{-4}}, exp_l0);
p.add_instruction(migraphx::op::log{}, sum_l0);
auto prog = optimize_onnx("reduce_log_sum_exp_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducemax_test) TEST_CASE(reducemax_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -925,6 +973,16 @@ TEST_CASE(reducemin_test) ...@@ -925,6 +973,16 @@ TEST_CASE(reducemin_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reduceprod_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
p.add_instruction(migraphx::op::reduce_prod{{2}}, l0);
auto prog = optimize_onnx("reduceprod_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_test) TEST_CASE(reducesum_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -957,6 +1015,18 @@ TEST_CASE(reducesum_keepdims_test) ...@@ -957,6 +1015,18 @@ TEST_CASE(reducesum_keepdims_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reducesum_square_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto squ_l0 = p.add_instruction(migraphx::op::mul{}, l0, l0);
auto sum_l0 = p.add_instruction(migraphx::op::reduce_sum{{-2}}, squ_l0);
p.add_instruction(migraphx::op::squeeze{{-2}}, sum_l0);
auto prog = optimize_onnx("reducesum_square_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reshape_test) TEST_CASE(reshape_test)
{ {
migraphx::program p; migraphx::program p;
......
reduce_log_sum_exp_test:
>
xy"ReduceLogSumExp*
axes@*
keepdimsreduce_log_sum_exp_testZ
x




b
y



B
\ No newline at end of file
reduce_log_sum_test:
;
xy" ReduceLogSum*
axes@*
keepdimsreduce_log_sum_testZ
x




b
y




B
\ No newline at end of file
reduceprod_test:}
0
xy"
ReduceProd*
axes@*
keepdimsreduceprod_testZ
x




b
y




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment