Commit e2eb6036 authored by Paul's avatar Paul
Browse files

Merge

parents 298c93d5 1e0bbd78
......@@ -10,7 +10,7 @@
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/compile_hip_code_object.hpp>
#include <migraphx/gpu/compile_pointwise.hpp>
#include <migraphx/gpu/compiler.hpp>
// NOLINTNEXTLINE
const std::string write_2s = R"__migraphx__(
......@@ -230,7 +230,8 @@ TEST_CASE(compile_pointwise)
migraphx::shape input{migraphx::shape::float_type, {5, 2}};
migraphx::gpu::context ctx;
auto co = migraphx::gpu::compile_pointwise(ctx, {input, input}, "[](auto x) { return x + 1; }");
auto co = migraphx::gpu::compile_op(
"pointwise", ctx, {input, input}, {{"lambda", "[](auto x) { return x + 1; }"}});
migraphx::program p;
auto* mm = p.get_main_module();
......
......@@ -68,9 +68,9 @@ struct nop
{
static std::string as_string() { return ""; }
template <class T>
static decltype(auto) call(T&& x)
static auto call(T&& x)
{
return x;
return static_cast<T&&>(x);
}
};
......@@ -113,6 +113,33 @@ inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.
return s;
}
template <class T>
const T& get_value(const T& x)
{
return x;
}
template <class T, class Operator = nop>
struct lhs_expression;
template <class T>
lhs_expression<T> make_lhs_expression(T&& lhs);
template <class T, class Operator>
lhs_expression<T, Operator> make_lhs_expression(T&& lhs, Operator);
// NOLINTNEXTLINE
#define TEST_EXPR_BINARY_OPERATOR(op, name) \
template <class V> \
auto operator op(const V& rhs2) const \
{ \
return make_expression(*this, rhs2, name{}); /* NOLINT */ \
}
// NOLINTNEXTLINE
#define TEST_EXPR_UNARY_OPERATOR(op, name) \
auto operator op() const { return make_lhs_expression(lhs, name{}); /* NOLINT */ }
template <class T, class U, class Operator>
struct expression
{
......@@ -125,7 +152,12 @@ struct expression
return s;
}
decltype(auto) value() const { return Operator::call(lhs, rhs); };
friend decltype(auto) get_value(const expression& e) { return e.value(); }
decltype(auto) value() const { return Operator::call(get_value(lhs), get_value(rhs)); };
TEST_FOREACH_UNARY_OPERATORS(TEST_EXPR_UNARY_OPERATOR)
TEST_FOREACH_BINARY_OPERATORS(TEST_EXPR_BINARY_OPERATOR)
};
// TODO: Remove rvalue references
......@@ -135,9 +167,6 @@ expression<T, U, Operator> make_expression(T&& rhs, U&& lhs, Operator)
return {std::forward<T>(rhs), std::forward<U>(lhs)};
}
template <class T, class Operator = nop>
struct lhs_expression;
// TODO: Remove rvalue reference
template <class T>
lhs_expression<T> make_lhs_expression(T&& lhs)
......@@ -166,22 +195,12 @@ struct lhs_expression
return s;
}
decltype(auto) value() const { return Operator::call(lhs); }
// NOLINTNEXTLINE
#define TEST_LHS_BINARY_OPERATOR(op, name) \
template <class U> \
auto operator op(const U& rhs) const \
{ \
return make_expression(lhs, rhs, name{}); /* NOLINT */ \
}
friend decltype(auto) get_value(const lhs_expression& e) { return e.value(); }
TEST_FOREACH_BINARY_OPERATORS(TEST_LHS_BINARY_OPERATOR)
decltype(auto) value() const { return Operator::call(get_value(lhs)); }
// NOLINTNEXTLINE
#define TEST_LHS_UNARY_OPERATOR(op, name) \
auto operator op() const { return make_lhs_expression(lhs, name{}); /* NOLINT */ }
TEST_FOREACH_UNARY_OPERATORS(TEST_LHS_UNARY_OPERATOR)
TEST_FOREACH_BINARY_OPERATORS(TEST_EXPR_BINARY_OPERATOR)
TEST_FOREACH_UNARY_OPERATORS(TEST_EXPR_UNARY_OPERATOR)
// NOLINTNEXTLINE
#define TEST_LHS_REOPERATOR(op) \
......@@ -197,7 +216,7 @@ struct lhs_expression
TEST_LHS_REOPERATOR(%)
TEST_LHS_REOPERATOR(&)
TEST_LHS_REOPERATOR(|)
TEST_LHS_REOPERATOR (^)
TEST_LHS_REOPERATOR(^)
};
template <class F>
......@@ -223,6 +242,13 @@ auto make_predicate(const std::string& msg, F f)
return make_lhs_expression(predicate<F>{msg, f}, function{});
}
inline std::string as_string(bool x)
{
if(x)
return "true";
return "false";
}
template <class T>
std::string as_string(const T& x)
{
......@@ -627,18 +653,21 @@ inline void run(int argc, const char* argv[])
} // namespace test
// NOLINTNEXTLINE
#define TEST_CAPTURE(...) test::capture{}->*__VA_ARGS__
// NOLINTNEXTLINE
#define CHECK(...) \
test::failed( \
test::capture{}->*__VA_ARGS__, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] { \
})
// NOLINTNEXTLINE
#define EXPECT(...) \
test::failed(test::capture{}->*__VA_ARGS__, \
#__VA_ARGS__, \
__PRETTY_FUNCTION__, \
__FILE__, \
__LINE__, \
#define EXPECT(...) \
test::failed(TEST_CAPTURE(__VA_ARGS__), \
#__VA_ARGS__, \
__PRETTY_FUNCTION__, \
__FILE__, \
__LINE__, \
&test::fail)
// NOLINTNEXTLINE
#define STATUS(...) EXPECT((__VA_ARGS__) == 0)
......
......@@ -55,7 +55,9 @@ TEST_CASE(rewrite_pad)
auto l0 = create_im2col(l_img, channels, m);
auto l1 = create_conv(l_img, channels, m);
auto l2 = m.add_instruction(
migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {0, 0, 1, 1}}}), l_img);
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max}, {"padding", {0, 0, 1, 1}}}),
l_img);
m.add_instruction(migraphx::make_op("identity"), l0, l1, l2);
run_pass(m);
......@@ -76,8 +78,10 @@ TEST_CASE(rewrite_pad_symmetric)
migraphx::shape s_img{migraphx::shape::int32_type, {1, channels, img_dim[0], img_dim[1]}};
auto l_img = m.add_literal(migraphx::literal{s_img, input});
m.add_instruction(migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {1, 1, 1, 1}}}),
l_img);
m.add_instruction(
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max}, {"padding", {1, 1, 1, 1}}}),
l_img);
run_pass(m);
EXPECT(std::none_of(
......
celu_alpha_test:R

xy"Celu*
alphaL?celu_alpha_testZ
x

b
y

B
\ No newline at end of file
celu_default_test:K
xy"Celucelu_default_testZ
x


b
y


B
\ No newline at end of file
celu_wrong_type_test:N
xy"Celucelu_wrong_type_testZ
x



b
y



B
\ No newline at end of file
eyelike_default_test:U

T1T2"EyeLikeeyelike_default_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_double_test:T

T1T2"EyeLikeeyelike_double_testZ
T1
 

b
T2
 

B
\ No newline at end of file
eyelike_half_test:R

T1T2"EyeLikeeyelike_half_testZ
T1



b
T2



B
\ No newline at end of file
eyelike_k_outofbounds_neg_test:r
$
T1T2"EyeLike*
keyelike_k_outofbounds_neg_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_k_outofbounds_pos_test:i

T1T2"EyeLike*
keyelike_k_outofbounds_pos_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_k_test:Y

T1T2"EyeLike*
keyelike_k_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_not_rank2_test:[

T1T2"EyeLikeeyelike_not_rank2_testZ
T1



b
T2


B
\ No newline at end of file
eyelike_set_dtype_test:e

T1T2"EyeLike*
dtype eyelike_set_dtype_testZ
T1


b
T2
 

B
\ No newline at end of file
eyelike_verify_negk_test:l
$
T1T2"EyeLike*
keyelike_verify_negk_testZ
T1


b
T2


B
\ No newline at end of file
eyelike_verify_test:^

T1T2"EyeLike*
keyelike_verify_testZ
T1


b
T2


B
\ No newline at end of file
......@@ -351,6 +351,65 @@ def ceil_test():
return ([node], [x], [y])
@onnx_test
def celu_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.8)
return ([node], [x], [y])
@onnx_test
def celu_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def celu_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.5)
return ([node], [x], [y])
@onnx_test
def celu_wrong_type_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [2, 3])
node = onnx.helper.make_node('Celu', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def celu_zero_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.0)
return ([node], [x], [y])
@onnx_test
def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......@@ -426,6 +485,22 @@ def clip_test_op11_no_args1():
return ([node], [x], [y])
@onnx_test
def clip_test_args_type_mismatch():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3])
min_val = helper.make_tensor('min', TensorProto.FLOAT, [1, 3],
[1.5, 2.5, 3.5])
max_val = helper.make_tensor('max', TensorProto.INT64, [3, 1], [2, 3, 4])
node = onnx.helper.make_node('Clip',
inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val])
@onnx_test
def concat_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 4, 3])
......@@ -1381,6 +1456,114 @@ def expand_test():
return ([shape_const, node], [x], [y])
@onnx_test
def eyelike_default_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_double_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.DOUBLE, [6, 15])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [6, 15])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_half_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT16, [8, 8])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT16, [8, 8])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=1)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_outofbounds_neg_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [2, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [2, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
@onnx_test
def eyelike_k_outofbounds_pos_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=4)
return ([node], [T1], [T2])
@onnx_test
def eyelike_not_rank2_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4, 2])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'EyeLike',
inputs=['T1'],
outputs=['T2'],
)
return ([node], [T1], [T2])
@onnx_test
def eyelike_verify_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike', inputs=['T1'], outputs=['T2'], k=1)
return ([node], [T1], [T2])
@onnx_test
def eyelike_verify_negk_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
@onnx_test
def eyelike_set_dtype_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [3, 4])
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
dtype=TensorProto.DOUBLE)
return ([node], [T1], [T2])
@onnx_test
def flatten_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......@@ -2307,6 +2490,32 @@ def instance_norm_val_3d_test():
return ([node], [], [y], [x_tensor, scale_tensor, bias_tensor])
@onnx_test
def isnan_float_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'IsNaN',
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test
def isnan_half_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT16, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.FLOAT16, [2, 3])
node = onnx.helper.make_node(
'IsNaN',
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test
def layernorm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 1, 5])
......@@ -2595,6 +2804,70 @@ def loop_test():
return ([node], [iter, cond, a, b], [b_loop, uout])
@onnx_test
def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
axis=2)
return ([node], [x], [y])
@onnx_test
def lpnormalization_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'LpNormalization',
inputs=['x'],
outputs=['y'],
axis=0,
)
return ([node], [x], [y])
@onnx_test
def lpnormalization_l1_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'LpNormalization',
inputs=['x'],
outputs=['y'],
p=1,
)
return ([node], [x], [y])
@onnx_test
def lpnormalization_l2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=2)
return ([node], [x], [y])
@onnx_test
def lpnormalization_p_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=3)
return ([node], [x], [y])
@onnx_test
def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
......@@ -4126,6 +4399,59 @@ def scatter_test():
return ([node], [x, i, u], [y])
@onnx_test
def scatternd_add_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="add")
return ([node], [data, indices, updates], [output])
@onnx_test
def scatternd_mul_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="mul")
return ([node], [data, indices, updates], [output])
@onnx_test
def scatternd_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'])
return ([node], [data, indices, updates], [output])
@onnx_test
def selu_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3])
......@@ -4231,6 +4557,54 @@ def sinh_test():
return ([node], [x], [y])
@onnx_test
def size_float_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 1])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_int_test():
x = helper.make_tensor_value_info('x', TensorProto.INT32, [8, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [1])
node = onnx.helper.make_node(
'Size',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment