Unverified Commit 4d46cbdb authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Logical ops (#718)

* add the and operator

* clang format

* add unit tests for the and operator

* clang format

* change the and name to logical_and and add the logical_or, logical_xor

* clang format

* add onnx unit tests for or and xor

* add more unit tests
parent 62a1b87b
...@@ -98,6 +98,9 @@ register_migraphx_ops( ...@@ -98,6 +98,9 @@ register_migraphx_ops(
less less
load load
log log
logical_and
logical_or
logical_xor
logsoftmax logsoftmax
lrn lrn
lstm lstm
......
#ifndef MIGRAPHX_GUARD_OPERATORS_LOGICAL_AND_HPP
#define MIGRAPHX_GUARD_OPERATORS_LOGICAL_AND_HPP
#include <migraphx/op/binary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct logical_and : binary<logical_and>
{
auto apply() const
{
return [](auto x, auto y) { return static_cast<bool>(x) and static_cast<bool>(y); };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_OPERATORS_LOGICAL_OR_HPP
#define MIGRAPHX_GUARD_OPERATORS_LOGICAL_OR_HPP
#include <migraphx/op/binary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct logical_or : binary<logical_or>
{
auto apply() const
{
return [](auto x, auto y) { return static_cast<bool>(x) or static_cast<bool>(y); };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_OPERATORS_LOGICAL_XOR_HPP
#define MIGRAPHX_GUARD_OPERATORS_LOGICAL_XOR_HPP
#include <migraphx/op/binary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct logical_xor : binary<logical_xor>
{
auto apply() const
{
return [](auto x, auto y) { return static_cast<bool>(x) xor static_cast<bool>(y); };
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -43,6 +43,9 @@ ...@@ -43,6 +43,9 @@
#include <migraphx/op/less.hpp> #include <migraphx/op/less.hpp>
#include <migraphx/op/load.hpp> #include <migraphx/op/load.hpp>
#include <migraphx/op/log.hpp> #include <migraphx/op/log.hpp>
#include <migraphx/op/logical_and.hpp>
#include <migraphx/op/logical_or.hpp>
#include <migraphx/op/logical_xor.hpp>
#include <migraphx/op/logsoftmax.hpp> #include <migraphx/op/logsoftmax.hpp>
#include <migraphx/op/lrn.hpp> #include <migraphx/op/lrn.hpp>
#include <migraphx/op/lstm.hpp> #include <migraphx/op/lstm.hpp>
......
...@@ -11,7 +11,14 @@ struct parse_binary_op : op_parser<parse_binary_op> ...@@ -11,7 +11,14 @@ struct parse_binary_op : op_parser<parse_binary_op>
{ {
std::vector<op_desc> operators() const std::vector<op_desc> operators() const
{ {
return {{"Add", "add"}, {"Div", "div"}, {"Mul", "mul"}, {"PRelu", "prelu"}, {"Sub", "sub"}}; return {{"Add", "add"},
{"Div", "div"},
{"And", "logical_and"},
{"Or", "logical_or"},
{"Xor", "logical_xor"},
{"Mul", "mul"},
{"PRelu", "prelu"},
{"Sub", "sub"}};
} }
instruction_ref parse(const op_desc& opd, instruction_ref parse(const op_desc& opd,
......
...@@ -42,6 +42,9 @@ add_library(migraphx_device ...@@ -42,6 +42,9 @@ add_library(migraphx_device
device/layernorm.cpp device/layernorm.cpp
device/less.cpp device/less.cpp
device/log.cpp device/log.cpp
device/logical_and.cpp
device/logical_or.cpp
device/logical_xor.cpp
device/logsoftmax.cpp device/logsoftmax.cpp
device/max.cpp device/max.cpp
device/min.cpp device/min.cpp
...@@ -167,6 +170,9 @@ register_migraphx_gpu_ops(hip_ ...@@ -167,6 +170,9 @@ register_migraphx_gpu_ops(hip_
less less
log log
logsoftmax logsoftmax
logical_and
logical_or
logical_xor
max max
min min
mul mul
......
#include <migraphx/gpu/device/logical_and.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/type_traits.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_and(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2)
{
nary(stream, result, arg1, arg2)(
[](auto x, auto y) __device__ { return static_cast<bool>(x) and static_cast<bool>(y); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/logical_or.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/type_traits.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_or(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2)
{
nary(stream, result, arg1, arg2)(
[](auto x, auto y) __device__ { return static_cast<bool>(x) or static_cast<bool>(y); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/logical_xor.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/type_traits.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_xor(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2)
{
nary(stream, result, arg1, arg2)(
[](auto x, auto y) __device__ { return static_cast<bool>(x) xor static_cast<bool>(y); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_AND_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_AND_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_and(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_OR_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_OR_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_or(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_XOR_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGICAL_XOR_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void logical_xor(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_LOGICLA_AND_HPP
#define MIGRAPHX_GUARD_RTGLIB_LOGICLA_AND_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/logical_and.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_logical_and : binary_device<hip_logical_and, device::logical_and>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_LOGICAL_OR_HPP
#define MIGRAPHX_GUARD_RTGLIB_LOGICAL_OR_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/logical_or.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_logical_or : binary_device<hip_logical_or, device::logical_or>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_LOGICAL_XOR_HPP
#define MIGRAPHX_GUARD_RTGLIB_LOGICAL_XOR_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/logical_xor.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_logical_xor : binary_device<hip_logical_xor, device::logical_xor>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
#include <migraphx/gpu/int8_conv_pack.hpp> #include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/less.hpp> #include <migraphx/gpu/less.hpp>
#include <migraphx/gpu/logical_and.hpp>
#include <migraphx/gpu/logical_or.hpp>
#include <migraphx/gpu/logical_xor.hpp>
#include <migraphx/gpu/lrn.hpp> #include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp> #include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp> #include <migraphx/gpu/quant_convolution.hpp>
...@@ -112,6 +115,9 @@ struct miopen_apply ...@@ -112,6 +115,9 @@ struct miopen_apply
add_generic_op("greater"); add_generic_op("greater");
add_generic_op("less"); add_generic_op("less");
add_generic_op("log"); add_generic_op("log");
add_generic_op("logical_and");
add_generic_op("logical_or");
add_generic_op("logical_xor");
add_generic_op("max"); add_generic_op("max");
add_generic_op("min"); add_generic_op("min");
add_generic_op("mul"); add_generic_op("mul");
......
...@@ -1775,6 +1775,39 @@ def log_test(): ...@@ -1775,6 +1775,39 @@ def log_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def logical_and_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.BOOL, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.BOOL, [4, 5])
z = helper.make_tensor_value_info('2', TensorProto.BOOL, [2, 3, 4, 5])
node = onnx.helper.make_node('And', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [z])
@onnx_test
def logical_or_test():
x = helper.make_tensor_value_info('0', TensorProto.BOOL, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.BOOL, [2, 3, 4, 5])
z = helper.make_tensor_value_info('2', TensorProto.BOOL, [2, 3, 4, 5])
node = onnx.helper.make_node('Or', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [z])
@onnx_test
def logical_xor_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.BOOL, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.BOOL, [4, 1])
z = helper.make_tensor_value_info('2', TensorProto.BOOL, [2, 3, 4, 5])
node = onnx.helper.make_node('Xor', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [z])
@onnx_test @onnx_test
def logsoftmax_test(): def logsoftmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
......
logical_and_bcast_test:w

0
12"Andlogical_and_bcast_testZ
0
 



Z
1
 

b
2
 



B
\ No newline at end of file
logical_or_test:w
0
12"Orlogical_or_testZ
0
 



Z
1
 



b
2
 



B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment