Commit 3a4d36cf authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 6bec381f e19f78ae
......@@ -39,8 +39,8 @@ TEST_CASE(literal_test)
migraphx::literal l2 = l1; // NOLINT
EXPECT(l1 == l2);
EXPECT(l1.at<int>(0) == 1);
EXPECT(!l1.empty());
EXPECT(!l2.empty());
EXPECT(not l1.empty());
EXPECT(not l2.empty());
migraphx::literal l3{};
migraphx::literal l4{};
......
......@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto sub_modules = p.get_modules();
std::reverse(sub_modules.begin(), sub_modules.end());
for(auto& smod : sub_modules)
for(const auto& smod : sub_modules)
{
run_pass(*smod);
}
......
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x






Z
scale


Z
bias


Z
mean


Z
variance


b
y






B
\ No newline at end of file
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x




Z
scale

Z
bias


Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_invalid_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
batchnorm_1d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_1d_testZ
0



Z
1

Z
2

Z
3

Z
4

b
5



B
\ No newline at end of file
batchnorm_3d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_3d_testZ
0





Z
1

Z
2

Z
3

Z
4

b
5





B
\ No newline at end of file
......@@ -314,38 +314,107 @@ def averagepool_same_upper_test():
@onnx_test
def batchnorm_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_flat_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [1])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batchnorm_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [2, 3, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT16, [2])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16, [2])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT16, [2])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [8, 8])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [8])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [8])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [8])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [8])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 8])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_bias_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3, 1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
......@@ -3589,7 +3658,7 @@ def nms_test():
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3])
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
......@@ -3603,6 +3672,108 @@ def nms_test():
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_use_dyn_output_false_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT, [1, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
use_dyn_output=0)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_batch_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [None, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[None, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1,
use_dyn_output=1)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_boxes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, None, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, 1, None])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_classes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, None, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def not_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [4])
......
No preview for this file type
......@@ -38,7 +38,6 @@
#include <migraphx/onnx.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp>
......@@ -370,36 +369,134 @@ TEST_CASE(averagepool_same_upper_test)
EXPECT(p == prog);
}
TEST_CASE(batchnorm_1d_test)
TEST_CASE(batch_norm_flat_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {3}});
auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {3}});
auto l3 = mm->add_parameter("3", {migraphx::shape::float_type, {3}});
auto l4 = mm->add_parameter("4", {migraphx::shape::float_type, {3}});
mm->add_instruction(migraphx::make_op("batch_norm_inference"), l0, l1, l2, l3, l4);
auto prog = optimize_onnx("batchnorm_1d_test.onnx");
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {10}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {1}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {1}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {1}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_flat_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batchnorm_3d_test)
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5, 5}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {3}});
auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {3}});
auto l3 = mm->add_parameter("3", {migraphx::shape::float_type, {3}});
auto l4 = mm->add_parameter("4", {migraphx::shape::float_type, {3}});
mm->add_instruction(migraphx::make_op("batch_norm_inference"), l0, l1, l2, l3, l4);
auto prog = optimize_onnx("batchnorm_3d_test.onnx");
auto x = mm->add_parameter("x", {migraphx::shape::half_type, {2, 3, 4}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {3}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {3}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_2d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3, 4, 4}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {3}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {3}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_2d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_3d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::half_type, {2, 2, 2, 2, 2}});
auto scale = mm->add_parameter("scale", {migraphx::shape::half_type, {2}});
auto bias = mm->add_parameter("bias", {migraphx::shape::half_type, {2}});
auto mean = mm->add_parameter("mean", {migraphx::shape::half_type, {2}});
auto var = mm->add_parameter("variance", {migraphx::shape::half_type, {2}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-6f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_invalid_rank)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("batch_norm_invalid_rank.onnx"); }));
}
TEST_CASE(batch_norm_invalid_bias_rank)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("batch_norm_invalid_bias_rank.onnx"); }));
}
TEST_CASE(cast_test)
{
migraphx::program p;
......@@ -792,18 +889,46 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = mm->add_parameter("2", {migraphx::shape::float_type, {1}});
auto p3 = mm->add_parameter("3", {migraphx::shape::float_type, {1}});
auto p4 = mm->add_parameter("4", {migraphx::shape::float_type, {1}});
auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}});
auto p3 = mm->add_parameter("3", {migraphx::shape::float_type, {1}});
auto p4 = mm->add_parameter("4", {migraphx::shape::float_type, {1}});
auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
uint64_t axis = 1;
auto l3 =
mm->add_instruction(migraphx::make_op("convolution", {{"padding", {0, 0, 0, 0}}}), l0, l1);
auto l4 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", l3->get_shape().lens()}}), l2);
auto l5 = mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto l6 = mm->add_instruction(
migraphx::make_op("batch_norm_inference", {{"epsilon", 1.0e-5f}}), l5, p3, p4, p5, p6);
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p3);
auto usq_bias = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p4);
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p5);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p6);
auto mb_mean = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_mean);
auto numer = mm->add_instruction(migraphx::make_op("sub"), l5, mb_mean);
auto mb_eps =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), eps);
auto var_eps = mm->add_instruction(migraphx::make_op("add"), usq_var, mb_eps);
auto mb_rt =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), rt);
auto denom = mm->add_instruction(migraphx::make_op("pow"), var_eps, mb_rt);
auto mb_denom = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), denom);
auto div0 = mm->add_instruction(migraphx::make_op("div"), numer, mb_denom);
auto mb_scale = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_scale);
auto r0 = mm->add_instruction(migraphx::make_op("mul"), div0, mb_scale);
auto mb_bias = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_bias);
auto l6 = mm->add_instruction(migraphx::make_op("add"), r0, mb_bias);
auto l7 = mm->add_instruction(migraphx::make_op("relu"), l6);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
......@@ -3378,13 +3503,127 @@ TEST_CASE(nms_test)
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}), b, s, mo, iou, st);
migraphx::make_op("nonmaxsuppression", {{"center_point_box", true}}), b, s, mo, iou, st);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("nms_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 10, 0}, {6, 6, 0}, {4, 4, 0}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {6, 6, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
b,
s,
mo,
iou,
st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_boxes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 1, 0}, {6, 20, 0}, {4, 4, 0}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {6, 20, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {6, 20, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_boxes_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_classes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 10, 0}, {6, 6, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_classes_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_overwrite_use_dyn_output_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {1, 1, 6}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_use_dyn_output_false_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nonzero_dynamic_test)
{
migraphx::program p;
......
......@@ -30,6 +30,7 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/half.hpp>
#include "test.hpp"
TEST_CASE(averagepool_notset_test)
......@@ -68,6 +69,196 @@ TEST_CASE(averagepool_nt_cip_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_flat_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_flat_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {10}};
migraphx::shape c_shape(migraphx::shape::float_type, {1});
std::vector<float> x_data = {1.6524342,
-0.51048076,
0.32543048,
2.4410043,
2.0833702,
0.44981122,
1.0044622,
-0.24006313,
-0.43065986,
0.07626268};
std::vector<float> scale_data = {-0.02927135};
std::vector<float> bias_data = {0.42347777};
std::vector<float> mean_data = {-0.00449735};
std::vector<float> variance_data = {0.5184545};
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.35612,
0.44404706,
0.4100655,
0.32406294,
0.33860153,
0.40500915,
0.38246143,
0.43305403,
0.4408022,
0.42019472};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::half_type, {2, 3, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {3});
std::vector<float> tmp = {1.652, -0.5103, 0.3254, 2.441, 2.084, 0.4497,
1.005, -0.2401, -0.4307, 0.07623, -0.02927, 0.4236,
-0.004498, -0.4282, -0.5527, 0.02205, -1.472, -1.7295,
0.796, 0.9507, 0.2312, 0.664, -0.06964, 1.035};
std::vector<migraphx::half> x_data{tmp.cbegin(), tmp.cend()};
std::vector<float> scale_data = {-1.336926, -1.0679098, 0.10368501};
std::vector<float> bias_data = {0.20240043, -0.70175606, -0.8859727};
std::vector<float> mean_data = {0.30854642, -0.36574763, -0.9463552};
std::vector<float> variance_data = {0.43428132, 0.97773486, 0.30332062};
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {-2.523, 1.863, 0.1681, -4.125, -3.348, -1.582, -2.182, -0.8374,
-0.789, -0.6934, -0.7134, -0.628, 0.8374, 1.697, 1.949, 0.7837,
0.4927, 0.771, -1.956, -2.123, -0.664, -0.583, -0.7207, -0.5127};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_2d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_2d_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {2, 3, 4, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {3});
std::vector<float> x_data = {
1.6524342, -0.51048076, 0.32543048, 2.4410043, 2.0833702, 0.44981122, 1.0044622,
-0.24006313, -0.43065986, 0.07626268, -0.02927135, 0.42347777, -0.00449735, -0.4281568,
-0.5527635, 0.02204161, -1.4719028, -1.7298799, 0.79596406, 0.9505461, 0.23115851,
0.6639593, -0.06963254, 1.0348768, -1.336926, -1.0679098, 0.10368501, 0.20240043,
-0.70175606, -0.8859727, 0.30854642, -0.36574763, -0.9463552, 0.9476916, 0.37686515,
-0.05184272, -0.7151244, -0.37341377, 0.59440356, 0.10051094, -0.20755945, 0.9098465,
1.1664004, 1.4075205, -1.1522529, -0.34607422, 0.32027543, -0.6885485, 0.5404544,
0.10012514, 0.8767704, 1.0032021, -1.2755303, 0.23577735, 0.74239916, 1.0146079,
0.60875916, -0.29163074, 1.4872868, 0.20466477, -0.26367408, -0.56394804, -0.56043875,
0.7763664, -0.9626441, 0.29653943, -3.2231965, 0.03322164, 0.03402911, 0.77308357,
-0.0654009, -0.30463725, 0.22182712, -0.22594836, -0.5807543, -0.22390617, -0.24484141,
-2.0761833, 1.8459716, 0.2455878, 0.99913245, -0.9266217, -0.1938893, 0.6417983,
-1.0880078, 0.49565446, 2.1584804, 1.2276239, 3.3091128, 0.14217089, 0.9425477,
0.07578196, 0.4067431, 0.71984154, -0.20796849, 0.90003085};
std::vector<float> scale_data = {0.658487, 0.03700604, 2.463201};
std::vector<float> bias_data = {0.03497279, 0.17080553, 0.5636415};
std::vector<float> mean_data = {0.1954783, 0.6203974, 0.8116831};
std::vector<float> variance_data = {0.30558077, 0.04536599, 0.05461315};
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
1.77046824e+00, -8.05950999e-01, 1.89769119e-01, 2.70979643e+00, 2.28379035e+00,
3.37928861e-01, 9.98617530e-01, -4.83835101e-01, -7.10869908e-01, -1.07034385e-01,
-2.32744321e-01, 3.06560963e-01, -2.03234047e-01, -7.07888365e-01, -8.56317282e-01,
-1.71621382e-01, -1.92677066e-01, -2.37493858e-01, 2.01305658e-01, 2.28160262e-01,
1.03185430e-01, 1.78373277e-01, 5.09308279e-02, 2.42810518e-01, -1.69228360e-01,
-1.22493818e-01, 8.10402334e-02, 9.81894583e-02, -5.88841513e-02, -9.08869803e-02,
1.16629556e-01, -5.11445105e-04, -1.79648399e+01, 1.99707508e+00, -4.01903248e+00,
-8.53731060e+00, -1.55278311e+01, -1.19264421e+01, -1.72633123e+00, -6.93161058e+00,
-1.01784554e+01, 1.59821415e+00, 4.30211163e+00, 6.84334660e+00, -2.01348572e+01,
-1.16383028e+01, -4.61544800e+00, -1.52477398e+01, 4.45901126e-01, -7.86099210e-02,
8.46513629e-01, 9.97116446e-01, -1.71726203e+00, 8.29761624e-02, 6.86453462e-01,
1.01070285e+00, 5.27264357e-01, -5.45261383e-01, 1.57374811e+00, 4.59154993e-02,
-5.11959970e-01, -8.69639993e-01, -8.65459919e-01, 7.26914644e-01, -1.04206637e-01,
1.14543661e-01, -4.96918678e-01, 6.87990561e-02, 6.89393356e-02, 1.97330773e-01,
5.16659655e-02, 1.01048872e-02, 1.01564340e-01, 2.37750299e-02, -3.78632471e-02,
2.41298079e-02, 2.04928555e-02, -2.97655046e-01, 3.83717060e-01, 1.05692141e-01,
2.53922558e+00, -1.77568626e+01, -1.00343809e+01, -1.22682428e+00, -1.94577579e+01,
-2.76707697e+00, 1.47579327e+01, 4.94736385e+00, 2.68847847e+01, -6.49254417e+00,
1.94286156e+00, -7.19223642e+00, -3.70413971e+00, -4.04303551e-01, -1.01827660e+01,
1.49476433e+00};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_3d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_3d_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::half_type, {2, 2, 2, 2, 2}};
migraphx::shape c_shape(migraphx::shape::half_type, {2});
// using migraphx::half copy conversion since it doesn't have initializer_list constructor
std::vector<float> tmp = {5., 5., 8., 7., 3., 4., 1., 7., 5., 5., 9., 4., 7., 2., 2., 2.,
6., 1., 4., 9., 2., 8., 0., 2., 1., 4., 8., 8., 3., 3., 0., 8.};
std::vector<migraphx::half> x_data{tmp.cbegin(), tmp.cend()};
tmp = {1., 1.};
std::vector<migraphx::half> scale_data{tmp.cbegin(), tmp.cend()};
tmp = {
0.,
0.,
};
std::vector<migraphx::half> bias_data{tmp.cbegin(), tmp.cend()};
tmp = {-0.75, 0.29};
std::vector<migraphx::half> mean_data{tmp.cbegin(), tmp.cend()};
tmp = {0.31, 0.37};
std::vector<migraphx::half> variance_data{tmp.cbegin(), tmp.cend()};
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {10.33, 10.33, 15.71, 13.914, 6.734, 8.53, 3.143, 13.914, 7.742, 7.742, 14.32,
6.098, 11.03, 2.81, 2.81, 2.81, 12.125, 3.143, 8.53, 17.52, 4.938, 15.71,
1.347, 4.938, 1.167, 6.098, 12.67, 12.67, 4.453, 4.453, -0.4768, 12.67};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(celu_verify_test)
{
migraphx::program p = migraphx::parse_onnx("celu_verify_test.onnx");
......
......@@ -1135,6 +1135,149 @@ TEST_CASE(multinomial)
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s, s);
}
TEST_CASE(nms_shape)
{
// use_dyn_output == false
migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
migraphx::shape max_out_s{migraphx::shape::int64_type, {1}};
migraphx::shape iou_thres_s{migraphx::shape::float_type, {1}};
migraphx::shape score_thres_s{migraphx::shape::float_type, {1}};
migraphx::shape output_s{migraphx::shape::int64_type, {6, 3}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", false}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// use_dyn_output == true
output_s = {migraphx::shape::int64_type, {{0, 6, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic batches
boxes_s = {migraphx::shape::float_type, {{1, 3, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 3, 0}, {1, 1, 0}, {6, 6, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 18, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic num boxes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 20, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {6, 20, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 20, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// use_dyn_output false with dynamic input shape
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", false}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic classes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 3, 0}, {6, 6, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 6, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// fixed mismatch batches
boxes_s = {migraphx::shape::float_type, {2, 6, 4}};
scores_s = {migraphx::shape::float_type, {1, 1, 6}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// fixed mismatch num boxes
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {1, 1, 4}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic mismatch batches
boxes_s = {migraphx::shape::float_type, {{1, 4, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{2, 8, 0}, {1, 1, 0}, {6, 6, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic mismatch num boxes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 8, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {3, 9, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic number of classes, fixed boxes_s, mismatch batches
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {{1, 3, 0}, {1, 3, 0}, {6, 6, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic number of classes, fixed boxes_s, mismatch num boxes
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 3, 0}, {4, 8, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
}
TEST_CASE(pooling_shape)
{
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
......
......@@ -138,6 +138,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_eyelike.*')
backend_test.include(r'.*test_flatten.*')
backend_test.include(r'.*test_floor.*')
backend_test.include(r'.*test_fmod.*')
backend_test.include(r'.*test_gather.*')
backend_test.include(r'.*test_gemm.*')
backend_test.include(r'.*test_globalaveragepool.*')
......@@ -162,6 +163,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_MaxPool[1-9]d.*')
backend_test.include(r'.*test_mean.*')
backend_test.include(r'.*test_min.*')
backend_test.include(r' .*test_mod.*')
backend_test.include(r'.*test_mul.*')
backend_test.include(r'.*test_multinomial.*')
backend_test.include(r'.*test_Multinomial.*')
......@@ -179,6 +181,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_operator_max_.*')
backend_test.include(r'.*test_operator_maxpool.*')
backend_test.include(r'.*test_operator_min.*')
backend_test.include(r'.*test_operator_mod.*')
backend_test.include(r'.*test_operator_mm.*')
backend_test.include(r'.*test_operator_non_float_params.*')
backend_test.include(r'.*test_operator_params.*')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment