Commit 3962c2ad authored by Shucai Xiao's avatar Shucai Xiao Committed by mvermeulen
Browse files

Reduce_min/max operators (#363)

* first version of refactoring reduce operators.

* clang format

* refactor the gpu implemantation of the reduce_mean operator

* clang format

* refactor gpu implementation of the resuce_sum operator

* fix cpp check error

* fix cppcheck error

* fix cppcheck error

* fix review comments

* clang format

* fix a jenkin error

* fixed review comments

* clang format

* fix review comments

* clang format

* fix review comments

* clang format

* add implemenation of reduce_min and reduce_max

* clang format

* add unit test for reduce_min/max operator

* clang format

* add more unit tests

* clang format

* fix review comments
parent c8192195
#ifndef MIGRAPHX_GUARD_OPERATORS_REDUCE_MAX_HPP
#define MIGRAPHX_GUARD_OPERATORS_REDUCE_MAX_HPP
#include <migraphx/op/reduce_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct reduce_max : reduce_op<reduce_max>
{
reduce_max() {}
reduce_max(std::vector<int64_t> ax) : reduce_op(std::move(ax)) {}
auto op() const
{
return [=](auto x, auto y) { return x > y ? x : y; };
}
auto init() const { return lowest(); }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_OPERATORS_MEAN_HPP
#define MIGRAPHX_GUARD_OPERATORS_MEAN_HPP
#ifndef MIGRAPHX_GUARD_OPERATORS_REDUCE_MEAN_HPP
#define MIGRAPHX_GUARD_OPERATORS_REDUCE_MEAN_HPP
#include <migraphx/op/reduce_op.hpp>
......
#ifndef MIGRAPHX_GUARD_OPERATORS_REDUCE_MIN_HPP
#define MIGRAPHX_GUARD_OPERATORS_REDUCE_MIN_HPP
#include <migraphx/op/reduce_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct reduce_min : reduce_op<reduce_min>
{
reduce_min() {}
reduce_min(std::vector<int64_t> ax) : reduce_op(std::move(ax)) {}
auto op() const
{
return [=](auto x, auto y) { return x < y ? x : y; };
}
auto init() const { return highest(); }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_OPERATORS_SUM_HPP
#define MIGRAPHX_GUARD_OPERATORS_SUM_HPP
#ifndef MIGRAPHX_GUARD_OPERATORS_REDUCE_SUM_HPP
#define MIGRAPHX_GUARD_OPERATORS_REDUCE_SUM_HPP
#include <migraphx/op/reduce_op.hpp>
......
......@@ -51,6 +51,8 @@
#include <migraphx/op/pow.hpp>
#include <migraphx/op/reduce_sum.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/reduce_min.hpp>
#include <migraphx/op/reduce_max.hpp>
#include <migraphx/op/relu.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/rnn.hpp>
......
......@@ -105,6 +105,8 @@ struct onnx_parser
add_mem_op("Pad", &onnx_parser::parse_pad);
add_mem_op("ReduceSum", &onnx_parser::parse_reduce_oper<op::reduce_sum>);
add_mem_op("ReduceMean", &onnx_parser::parse_reduce_oper<op::reduce_mean>);
add_mem_op("ReduceMin", &onnx_parser::parse_reduce_oper<op::reduce_min>);
add_mem_op("ReduceMax", &onnx_parser::parse_reduce_oper<op::reduce_max>);
// init the activation function map
init_actv_func();
......
......@@ -49,6 +49,8 @@ add_library(migraphx_device
device/round.cpp
device/sqrt.cpp
device/reduce_mean.cpp
device/reduce_min.cpp
device/reduce_max.cpp
device/pow.cpp
device/sqdiff.cpp
device/sign.cpp
......
#include <migraphx/gpu/device/reduce_max.hpp>
#include <migraphx/gpu/device/reduce.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_max(hipStream_t stream, const argument& result, const argument& arg)
{
reduce(stream, result, arg, max{}, lowest{}, id{}, id{});
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/reduce_min.hpp>
#include <migraphx/gpu/device/reduce.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_min(hipStream_t stream, const argument& result, const argument& arg)
{
reduce(stream, result, arg, min{}, highest{}, id{}, id{});
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_MAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_MAX_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_max(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_MIN_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_MIN_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_min(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_REDUCE_MAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_REDUCE_MAX_HPP
#include <migraphx/op/reduce_max.hpp>
#include <migraphx/gpu/reduce_op.hpp>
#include <migraphx/gpu/device/reduce_max.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_reduce_max : reduce_op<hip_reduce_max, op::reduce_max, device::reduce_max>
{
hip_reduce_max() {}
hip_reduce_max(const op::reduce_max& op_ref) : reduce_op(op_ref) {}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_REDUCE_MIN_HPP
#define MIGRAPHX_GUARD_RTGLIB_REDUCE_MIN_HPP
#include <migraphx/op/reduce_min.hpp>
#include <migraphx/gpu/reduce_op.hpp>
#include <migraphx/gpu/device/reduce_min.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_reduce_min : reduce_op<hip_reduce_min, op::reduce_min, device::reduce_min>
{
hip_reduce_min() {}
hip_reduce_min(const op::reduce_min& op_ref) : reduce_op(op_ref) {}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -58,6 +58,8 @@
#include <migraphx/gpu/rsqrt.hpp>
#include <migraphx/gpu/sqrt.hpp>
#include <migraphx/gpu/reduce_mean.hpp>
#include <migraphx/gpu/reduce_min.hpp>
#include <migraphx/gpu/reduce_max.hpp>
#include <migraphx/gpu/pow.hpp>
#include <migraphx/gpu/sqdiff.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
......@@ -131,6 +133,8 @@ struct miopen_apply
add_extend_op<hip_clip, op::clip>("clip");
add_extend_op<hip_reduce_sum, op::reduce_sum>("reduce_sum");
add_extend_op<hip_reduce_mean, op::reduce_mean>("reduce_mean");
add_extend_op<hip_reduce_min, op::reduce_min>("reduce_min");
add_extend_op<hip_reduce_max, op::reduce_max>("reduce_max");
add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot");
......
......@@ -2014,6 +2014,96 @@ TEST_CASE(reduce_mean_int)
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_min_axis1)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 5, 6, 9, 10};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_min_axis02)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 3};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_min_axis12)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{1, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 5, 9};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_max_axis0)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{9, 10, 11, 12};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_max_axis01)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{11, 12};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_max_axis02)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{10, 12};
EXPECT(results_vector == gold);
}
TEST_CASE(sqdiff_test)
{
migraphx::program p;
......
......@@ -3820,41 +3820,45 @@ struct test_fp32_fp16_sub : verify_program<test_fp32_fp16_sub>
};
};
struct test_reduce_sum : verify_program<test_reduce_sum>
template <class Op, int Axis, migraphx::shape::type_t T>
struct test_reduce_op_large : verify_program<test_reduce_op_large<Op, Axis, T>>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 1026, 4, 3}};
migraphx::shape s{T, {3, 1026, 4, 3}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_sum{{1}}, x);
p.add_instruction(Op{{1}}, x);
return p;
};
};
struct test_reduce_sum_int : verify_program<test_reduce_sum_int>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::int32_type, {3, 4, 8, 8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_sum{{1}}, x);
return p;
};
};
template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_mean, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_max, 1, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shape::float_type>;
struct test_reduce_sum_half : verify_program<test_reduce_sum_half>
template <class Op, int Axis, migraphx::shape::type_t T>
struct test_reduce_op_small : verify_program<test_reduce_op_small<Op, Axis, T>>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {3, 4, 8, 8}};
migraphx::shape s{T, {3, 4, 8, 8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_sum{{1}}, x);
p.add_instruction(Op{{1}}, x);
return p;
};
};
template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_min, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::half_type>;
template struct test_reduce_op_small<migraphx::op::reduce_min, 2, migraphx::shape::half_type>;
struct test_rsqrt : verify_program<test_rsqrt>
{
......@@ -3869,54 +3873,6 @@ struct test_rsqrt : verify_program<test_rsqrt>
};
};
struct test_reduce_mean : verify_program<test_reduce_mean>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 9, 4, 3}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
struct test_reduce_mean2 : verify_program<test_reduce_mean2>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {1, 128, 768}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_mean{{2}}, x);
return p;
};
};
struct test_reduce_mean_int : verify_program<test_reduce_mean_int>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::int32_type, {3, 1024, 8, 8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
struct test_reduce_mean_half : verify_program<test_reduce_mean_half>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {3, 1024, 8, 8}};
auto x = p.add_parameter("x", s);
p.add_instruction(migraphx::op::reduce_mean{{2}}, x);
return p;
};
};
struct test_round : verify_program<test_round>
{
migraphx::program create_program() const
......
......@@ -1029,6 +1029,20 @@ def pow_test():
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def reducemax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2]
node = onnx.helper.make_node('ReduceMax',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducemean_test():
......@@ -1044,7 +1058,6 @@ def reducemean_test():
return ([node], [x], [y])
@onnx_test
def reducemean_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
......@@ -1059,6 +1072,19 @@ def reducemean_keepdims_test():
return ([node], [x], [y])
@onnx_test
def reducemin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 1])
axes = [1, 3]
node = onnx.helper.make_node('ReduceMin',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
@onnx_test
def reducesum_test():
......@@ -1070,7 +1096,7 @@ def reducesum_test():
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
keepdims=1)
return ([node], [x], [y])
......
......@@ -757,6 +757,16 @@ TEST_CASE(pow_test)
EXPECT(p == prog);
}
TEST_CASE(reducemax_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
p.add_instruction(migraphx::op::reduce_max{{2}}, l0);
auto prog = migraphx::parse_onnx("reducemax_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducemean_test)
{
migraphx::program p;
......@@ -778,6 +788,17 @@ TEST_CASE(reducemean_keepdims_test)
EXPECT(p == prog);
}
TEST_CASE(reducemin_test)
{
migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = p.add_instruction(migraphx::op::reduce_min{{2, 3}}, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l1);
auto prog = migraphx::parse_onnx("reducemin_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_test)
{
migraphx::program p;
......
reducemax-example:{
/
xy" ReduceMax*
axes@*
keepdimstest_reducemaxZ
x




b
y




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment