"archive-del/sdk/vscode:/vscode.git/clone" did not exist on "e9f832df3603397a4a672d3c54fa8413be9ba0d9"
Unverified Commit 48ffbfa5 authored by turneram's avatar turneram Committed by GitHub
Browse files

Added greater and less operators (#660)



* Added greater and less operators

* Fixed ops_test.cpp

* Set commutative to false for less, greater

* Refactored parse_equal/less/greater into parse_compare_op

* Removed unnecessary function attributes() from greater.hpp/less.hpp

* Added op_name arguments

* Removed local settings

* Formatting

* Missing comma

* Formatting

* Formatting

* Formatting

* Formatting

* Formatting

* Missing space
Co-authored-by: default avatarPaul Fultz II <pfultz2@yahoo.com>
parent 1d98fbb4
...@@ -81,10 +81,12 @@ register_migraphx_ops( ...@@ -81,10 +81,12 @@ register_migraphx_ops(
flatten flatten
floor floor
gather gather
greater
gru gru
identity identity
im2col im2col
leaky_relu leaky_relu
less
load load
log log
logsoftmax logsoftmax
......
#ifndef MIGRAPHX_GUARD_OPERATORS_GREATER_HPP
#define MIGRAPHX_GUARD_OPERATORS_GREATER_HPP
#include <migraphx/op/binary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct greater : binary<greater>
{
auto apply() const
{
return [](auto x, auto y) { return x > y; };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_OPERATORS_LESS_HPP
#define MIGRAPHX_GUARD_OPERATORS_LESS_HPP
#include <migraphx/op/binary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct less : binary<less>
{
auto apply() const
{
return [](auto x, auto y) { return x < y; };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -35,10 +35,12 @@ ...@@ -35,10 +35,12 @@
#include <migraphx/op/flatten.hpp> #include <migraphx/op/flatten.hpp>
#include <migraphx/op/floor.hpp> #include <migraphx/op/floor.hpp>
#include <migraphx/op/gather.hpp> #include <migraphx/op/gather.hpp>
#include <migraphx/op/greater.hpp>
#include <migraphx/op/gru.hpp> #include <migraphx/op/gru.hpp>
#include <migraphx/op/identity.hpp> #include <migraphx/op/identity.hpp>
#include <migraphx/op/im2col.hpp> #include <migraphx/op/im2col.hpp>
#include <migraphx/op/leaky_relu.hpp> #include <migraphx/op/leaky_relu.hpp>
#include <migraphx/op/less.hpp>
#include <migraphx/op/load.hpp> #include <migraphx/op/load.hpp>
#include <migraphx/op/log.hpp> #include <migraphx/op/log.hpp>
#include <migraphx/op/logsoftmax.hpp> #include <migraphx/op/logsoftmax.hpp>
......
...@@ -135,16 +135,18 @@ struct onnx_parser ...@@ -135,16 +135,18 @@ struct onnx_parser
add_mem_op("ConvTranspose", &onnx_parser::parse_conv_transpose); add_mem_op("ConvTranspose", &onnx_parser::parse_conv_transpose);
add_mem_op("Dropout", &onnx_parser::parse_dropout); add_mem_op("Dropout", &onnx_parser::parse_dropout);
add_mem_op("Elu", &onnx_parser::parse_elu); add_mem_op("Elu", &onnx_parser::parse_elu);
add_mem_op("Equal", &onnx_parser::parse_equal); add_mem_op("Equal", "equal", &onnx_parser::parse_compare_op);
add_mem_op("Expand", &onnx_parser::parse_expand); add_mem_op("Expand", &onnx_parser::parse_expand);
add_mem_op("GatherElements", &onnx_parser::parse_gather_elements); add_mem_op("GatherElements", &onnx_parser::parse_gather_elements);
add_mem_op("Gemm", &onnx_parser::parse_gemm); add_mem_op("Gemm", &onnx_parser::parse_gemm);
add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling); add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling); add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
add_mem_op("Greater", "greater", &onnx_parser::parse_compare_op);
add_mem_op("GRU", &onnx_parser::parse_gru); add_mem_op("GRU", &onnx_parser::parse_gru);
add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler); add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
add_mem_op("InstanceNormalization", &onnx_parser::parse_instancenorm); add_mem_op("InstanceNormalization", &onnx_parser::parse_instancenorm);
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu); add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
add_mem_op("Less", "less", &onnx_parser::parse_compare_op);
add_mem_op("LRN", &onnx_parser::parse_lrn); add_mem_op("LRN", &onnx_parser::parse_lrn);
add_mem_op("LSTM", &onnx_parser::parse_lstm); add_mem_op("LSTM", &onnx_parser::parse_lstm);
add_mem_op("MatMul", "dot", &onnx_parser::parse_matmul); add_mem_op("MatMul", "dot", &onnx_parser::parse_matmul);
...@@ -2476,10 +2478,12 @@ struct onnx_parser ...@@ -2476,10 +2478,12 @@ struct onnx_parser
return prog.add_literal(literal(out_s, out_data)); return prog.add_literal(literal(out_s, out_data));
} }
instruction_ref instruction_ref parse_compare_op(const std::string&,
parse_equal(const std::string&, const node_info&, std::vector<instruction_ref> args) const std::string& op_name,
const node_info&,
std::vector<instruction_ref> args)
{ {
auto l = add_broadcastable_binary_op(args[0], args[1], "equal"); auto l = add_broadcastable_binary_op(args[0], args[1], op_name);
if(l->get_shape().type() != shape::bool_type) if(l->get_shape().type() != shape::bool_type)
{ {
l = prog.add_instruction(make_op("convert", {{"target_type", shape::bool_type}}), l); l = prog.add_instruction(make_op("convert", {{"target_type", shape::bool_type}}), l);
......
...@@ -37,8 +37,10 @@ add_library(migraphx_device ...@@ -37,8 +37,10 @@ add_library(migraphx_device
device/floor.cpp device/floor.cpp
device/gather.cpp device/gather.cpp
device/gelu.cpp device/gelu.cpp
device/greater.cpp
device/int8_gemm_pack.cpp device/int8_gemm_pack.cpp
device/layernorm.cpp device/layernorm.cpp
device/less.cpp
device/log.cpp device/log.cpp
device/logsoftmax.cpp device/logsoftmax.cpp
device/max.cpp device/max.cpp
...@@ -158,6 +160,8 @@ register_migraphx_gpu_ops(hip_ ...@@ -158,6 +160,8 @@ register_migraphx_gpu_ops(hip_
exp exp
floor floor
gather gather
greater
less
log log
logsoftmax logsoftmax
max max
......
#include <migraphx/gpu/device/greater.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/type_traits.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void greater(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) __device__ { return x > y; });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/less.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/type_traits.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void less(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) __device__ { return x < y; });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_GREATER_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_GREATER_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void greater(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LESS_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LESS_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void less(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_GREATER_HPP
#define MIGRAPHX_GUARD_RTGLIB_GREATER_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/greater.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_greater : binary_device<hip_greater, device::greater>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_LESS_HPP
#define MIGRAPHX_GUARD_RTGLIB_LESS_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/less.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_less : binary_device<hip_less, device::less>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -24,9 +24,11 @@ ...@@ -24,9 +24,11 @@
#include <migraphx/gpu/elu.hpp> #include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/equal.hpp> #include <migraphx/gpu/equal.hpp>
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/greater.hpp>
#include <migraphx/gpu/hip.hpp> #include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp> #include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/less.hpp>
#include <migraphx/gpu/lrn.hpp> #include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp> #include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp> #include <migraphx/gpu/quant_convolution.hpp>
...@@ -106,6 +108,8 @@ struct miopen_apply ...@@ -106,6 +108,8 @@ struct miopen_apply
add_generic_op("erf"); add_generic_op("erf");
add_generic_op("exp"); add_generic_op("exp");
add_generic_op("floor"); add_generic_op("floor");
add_generic_op("greater");
add_generic_op("less");
add_generic_op("log"); add_generic_op("log");
add_generic_op("max"); add_generic_op("max");
add_generic_op("min"); add_generic_op("min");
......
...@@ -3006,4 +3006,86 @@ TEST_CASE(equal_brcst_test) ...@@ -3006,4 +3006,86 @@ TEST_CASE(equal_brcst_test)
EXPECT(results_vector == gold); EXPECT(results_vector == gold);
} }
TEST_CASE(greater_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {9}};
auto l0 =
p.add_literal(migraphx::literal{s, {1.1, 1.5, 0.1, -1.1, -1.5, -0.6, 0.0, 2.0, -2.0}});
auto l1 =
p.add_literal(migraphx::literal{s, {1.1, 1.6, -0.1, -1.2, -1.5, -0.7, 0.0, 2.3, -2.1}});
auto gr = p.add_instruction(migraphx::op::greater{}, l0, l1);
auto r = p.add_instruction(migraphx::op::convert{migraphx::shape::bool_type}, gr);
p.add_return({r});
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<bool> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<bool> gold = {false, false, true, true, false, true, false, false, true};
EXPECT(results_vector == gold);
}
TEST_CASE(greater_brcst_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {3, 3}};
auto l0 =
p.add_literal(migraphx::literal{s0, {1.1, 1.5, 0.1, -1.1, -1.5, -0.6, 0.0, 2.0, -2.0}});
migraphx::shape s1{migraphx::shape::float_type, {3, 1}};
auto l1 = p.add_literal(migraphx::literal{s1, {1.1, -1.5, 0.0}});
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{{3, 3}}, l1);
auto gr = p.add_instruction(migraphx::op::greater{}, l0, bl1);
auto r = p.add_instruction(migraphx::op::convert{migraphx::shape::bool_type}, gr);
p.add_return({r});
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<bool> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<bool> gold = {false, true, false, true, false, true, false, true, false};
EXPECT(results_vector == gold);
}
TEST_CASE(less_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {9}};
auto l0 =
p.add_literal(migraphx::literal{s, {1.1, 1.5, 0.1, -1.1, -1.5, -0.6, 0.0, 2.0, -2.0}});
auto l1 =
p.add_literal(migraphx::literal{s, {1.1, 1.6, -0.1, -1.2, -1.5, -0.7, 0.0, 2.3, -2.1}});
auto le = p.add_instruction(migraphx::op::less{}, l0, l1);
auto r = p.add_instruction(migraphx::op::convert{migraphx::shape::bool_type}, le);
p.add_return({r});
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<bool> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<bool> gold = {false, true, false, false, false, false, false, true, false};
EXPECT(results_vector == gold);
}
TEST_CASE(less_brcst_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {3, 3}};
auto l0 =
p.add_literal(migraphx::literal{s0, {1.1, 1.5, 0.1, -1.1, -1.5, -0.6, 0.0, 2.0, -2.0}});
migraphx::shape s1{migraphx::shape::float_type, {3, 1}};
auto l1 = p.add_literal(migraphx::literal{s1, {1.1, -1.5, 0.0}});
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{{3, 3}}, l1);
auto le = p.add_instruction(migraphx::op::less{}, l0, bl1);
auto r = p.add_instruction(migraphx::op::convert{migraphx::shape::bool_type}, le);
p.add_return({r});
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<bool> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<bool> gold = {false, false, true, false, false, false, false, false, true};
EXPECT(results_vector == gold);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -2914,4 +2914,68 @@ struct test_equal_brcst : verify_program<test_equal_brcst> ...@@ -2914,4 +2914,68 @@ struct test_equal_brcst : verify_program<test_equal_brcst>
}; };
}; };
struct test_greater : verify_program<test_greater>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {2, 3, 4, 6}};
auto input1 = p.add_parameter("x", s);
auto input2 = p.add_parameter("y", s);
auto r = p.add_instruction(migraphx::op::greater{}, input1, input2);
p.add_return({r});
return p;
};
};
struct test_greater_brcst : verify_program<test_greater_brcst>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {3, 3}};
auto l0 = p.add_parameter("x", s0);
migraphx::shape s1{migraphx::shape::float_type, {3, 1}};
auto l1 = p.add_parameter("y", s1);
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l1);
auto r = p.add_instruction(migraphx::op::greater{}, l0, bl1);
p.add_return({r});
return p;
};
};
struct test_less : verify_program<test_less>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {2, 3, 4, 6}};
auto input1 = p.add_parameter("x", s);
auto input2 = p.add_parameter("y", s);
auto r = p.add_instruction(migraphx::op::less{}, input1, input2);
p.add_return({r});
return p;
};
};
struct test_less_brcst : verify_program<test_less_brcst>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {3, 3}};
auto l0 = p.add_parameter("x", s0);
migraphx::shape s1{migraphx::shape::float_type, {3, 1}};
auto l1 = p.add_parameter("y", s1);
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l1);
auto r = p.add_instruction(migraphx::op::less{}, l0, bl1);
p.add_return({r});
return p;
};
};
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -1173,6 +1173,82 @@ def equal_bool_test(): ...@@ -1173,6 +1173,82 @@ def equal_bool_test():
return ([node1, node2], [x1, x2], [y]) return ([node1, node2], [x1, x2], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Greater',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def greater_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Greater',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Less',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x2], [y], [x1])
@onnx_test
def less_bool_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [2, 3])
x2 = helper.make_tensor_value_info('x2', TensorProto.BOOL, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node1 = onnx.helper.make_node('Cast', inputs=['x1'], outputs=['bx1'], to=9)
node2 = onnx.helper.make_node(
'Less',
inputs=['bx1', 'x2'],
outputs=['y'],
)
return ([node1, node2], [x1, x2], [y])
@onnx_test @onnx_test
def erf_test(): def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......
greater_bool_test:‡

x1bx1"Cast*
to  

bx1
x2y"Greatergreater_bool_testZ
x1


Z
x2
 

b
y


B
\ No newline at end of file
less_bool_test:

x1bx1"Cast*
to 

bx1
x2y"Lessless_bool_testZ
x1


Z
x2
 

b
y


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment