Commit 712f6134 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

merge changes from develop branch and resolve merge conflicts

parents 4a39a0f7 b20e3d4d
#ifndef MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#define MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#include <migraphx/program.hpp>
#include <migraphx/module.hpp>
#include <migraphx/make_op.hpp>
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = p.create_module(name);
auto* mm = p.get_main_module();
pm->set_bypass();
std::vector<migraphx::instruction_ref> params;
std::transform(inputs.begin(), inputs.end(), std::back_inserter(params), [&](auto input) {
return pm->add_parameter("x" + std::to_string(params.size()),
migraphx::shape{input->get_shape().type()});
});
auto r = f(pm, params);
pm->add_return({r});
return mm->add_instruction(migraphx::make_op("pointwise"), inputs, {pm});
}
inline auto single_pointwise(const std::string& name)
{
return [=](auto* pm, const auto& inputs) {
return pm->add_instruction(migraphx::make_op(name), inputs);
};
}
#endif // MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/marker.hpp>
#include <migraphx/instruction.hpp>
#include "test.hpp"
struct mock_marker
{
std::shared_ptr<std::stringstream> ss = std::make_shared<std::stringstream>();
void mark_start(migraphx::instruction_ref ins_ref)
{
std::string text = "Mock marker instruction start:" + ins_ref->name();
(*ss) << text;
}
void mark_stop(migraphx::instruction_ref)
{
std::string text = "Mock marker instruction stop.";
(*ss) << text;
}
void mark_start(const migraphx::program&)
{
std::string text = "Mock marker program start.";
(*ss) << text;
}
void mark_stop(const migraphx::program&)
{
std::string text = "Mock marker program stop.";
(*ss) << text;
}
};
TEST_CASE(marker)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto one = mm->add_literal(1);
auto two = mm->add_literal(2);
mm->add_instruction(migraphx::make_op("add"), one, two);
p.compile(migraphx::ref::target{});
mock_marker temp_marker;
p.mark({}, temp_marker);
std::string output = temp_marker.ss->str();
EXPECT(migraphx::contains(output, "Mock marker instruction start:@literal"));
EXPECT(migraphx::contains(output, "Mock marker instruction start:ref::op"));
EXPECT(migraphx::contains(output, "Mock marker instruction stop."));
EXPECT(migraphx::contains(output, "Mock marker program start."));
EXPECT(migraphx::contains(output, "Mock marker program stop."));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -11,7 +11,7 @@ std::vector<char> msgpack_buffer(const T& src) ...@@ -11,7 +11,7 @@ std::vector<char> msgpack_buffer(const T& src)
msgpack::pack(buffer, src); msgpack::pack(buffer, src);
buffer.seekg(0); buffer.seekg(0);
std::string str = buffer.str(); std::string str = buffer.str();
return std::vector<char>(str.data(), str.data() + str.size()); return std::vector<char>(str.data(), str.data() + str.size()); // NOLINT
} }
TEST_CASE(test_msgpack_empty_value) TEST_CASE(test_msgpack_empty_value)
......
...@@ -1061,6 +1061,62 @@ def depthtospace_crd_test(): ...@@ -1061,6 +1061,62 @@ def depthtospace_crd_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def spacetodepth_test():
x = helper.make_tensor_value_info('x', TensorProto.float, [2, 2, 10, 10])
y = helper.make_tensor_value_info('y', TensorProto.float, [2, 8, 5, 5])
node = onnx.helper.make_node('spacetodepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
@onnx_test
def spacetodepth_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
@onnx_test
def spacetodepth_invalid_blocksize_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=0.3)
return ([node], [x], [y])
@onnx_test
def spacetodepth_nondivisibility_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 2])
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
@onnx_test @onnx_test
def dequantizelinear_test(): def dequantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5]) arg0 = helper.make_tensor_value_info('0', TensorProto.INT8, [5])
...@@ -1562,6 +1618,22 @@ def greater_bool_test(): ...@@ -1562,6 +1618,22 @@ def greater_bool_test():
return ([node1, node2], [x1, x2], [y]) return ([node1, node2], [x1, x2], [y])
@onnx_test
def greaterorequal_test():
x1 = helper.make_tensor_value_info('x1', TensorProto.FLOAT, [3])
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'GreaterOrEqual',
inputs=['x1', 'x2'],
outputs=['y'],
)
return ([node], [x1, x2], [y])
@onnx_test @onnx_test
def group_conv_test(): def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
...@@ -1578,6 +1650,60 @@ def group_conv_test(): ...@@ -1578,6 +1650,60 @@ def group_conv_test():
return ([node], [x, y], [z]) return ([node], [x, y], [z])
@onnx_test
def hardsigmoid_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardsigmoid_double_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid',
inputs=['x'],
outputs=['y'],
alpha=0.3,
beta=0.7)
return ([node], [x], [y])
@onnx_test
def hardsigmoid_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 3, 4, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardsigmoid_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node('HardSigmoid', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def hardswish_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node('HardSwish', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test @onnx_test
def if_else_test(): def if_else_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
...@@ -2636,6 +2762,80 @@ def maxpool_same_upper_test(): ...@@ -2636,6 +2762,80 @@ def maxpool_same_upper_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def mean_broadcast_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 2, 3, 4])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [4])
data_3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1])
data_4 = helper.make_tensor_value_info('4', TensorProto.FLOAT, [2, 3, 1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT,
[1, 2, 3, 4])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2", "3", "4"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2, data_3, data_4], [mean])
@onnx_test
def mean_fp16_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 2, 3])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 2, 3])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1, 2, 3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16,
[1, 2, 3])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
@onnx_test
def mean_invalid_broadcast_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3])
data_1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2, 3])
data_2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 2, 4])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
@onnx_test
def mean_single_input_test():
data_0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node("Mean", inputs=["0"], outputs=["mean"])
return ([node], [data_0], [mean])
@onnx_test
def mean_test():
data = [
helper.make_tensor_value_info(str(i), TensorProto.DOUBLE, [2, 2, 2])
for i in range(10)
]
data_names = [str(i) for i in range(10)]
mean = helper.make_tensor_value_info('mean', TensorProto.DOUBLE, [2, 2, 2])
node = onnx.helper.make_node("Mean", inputs=data_names, outputs=["mean"])
return ([node], data, [mean])
@onnx_test @onnx_test
def min_test(): def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -2669,6 +2869,21 @@ def multinomial_test(): ...@@ -2669,6 +2869,21 @@ def multinomial_test():
return ([node], [input], [output]) return ([node], [input], [output])
@onnx_test
def multinomial_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test @onnx_test
def multinomial_dtype_error_test(): def multinomial_dtype_error_test():
sample_size = 10 sample_size = 10
...@@ -2715,6 +2930,31 @@ def neg_test(): ...@@ -2715,6 +2930,31 @@ def neg_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def nms_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT, [1, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test @onnx_test
def not_test(): def not_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [4]) x = helper.make_tensor_value_info('0', TensorProto.INT32, [4])
...@@ -3095,6 +3335,21 @@ def randomnormal_dtype_error_test(): ...@@ -3095,6 +3335,21 @@ def randomnormal_dtype_error_test():
return ([node], [], [output]) return ([node], [], [output])
@onnx_test
def randomnormal_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('RandomNormal',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test @onnx_test
def randomnormal_shape_error_test(): def randomnormal_shape_error_test():
dtype = 1 dtype = 1
...@@ -3185,6 +3440,21 @@ def randomuniform_dtype_error_test(): ...@@ -3185,6 +3440,21 @@ def randomuniform_dtype_error_test():
return ([node], [], [output]) return ([node], [], [output])
@onnx_test
def randomuniform_generated_seed_test():
sample_size = 10
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node('RandomUniform',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test @onnx_test
def randomuniform_shape_error_test(): def randomuniform_shape_error_test():
dtype = 1 dtype = 1
...@@ -3779,6 +4049,7 @@ def resize_upsample_pf_test(): ...@@ -3779,6 +4049,7 @@ def resize_upsample_pf_test():
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
@onnx_test
def resize_upsample_pc_test(): def resize_upsample_pc_test():
scales = np.array([1.0, 1.0, 2.0, 1.5], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 1.5], dtype=np.float32)
scale_tensor = helper.make_tensor(name='scales', scale_tensor = helper.make_tensor(name='scales',
...@@ -3801,6 +4072,41 @@ def resize_upsample_pc_test(): ...@@ -3801,6 +4072,41 @@ def resize_upsample_pc_test():
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
@onnx_test
def roialign_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 4, 7, 8])
roi = helper.make_tensor_value_info('rois', TensorProto.FLOAT, [8, 4])
bi = helper.make_tensor_value_info('batch_ind', TensorProto.INT64, [8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 4, 1, 1])
node = onnx.helper.make_node('RoiAlign',
inputs=['x', 'rois', 'batch_ind'],
outputs=['y'])
return ([node], [x, roi, bi], [y])
@onnx_test
def roialign_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 5, 4, 7])
roi = helper.make_tensor_value_info('rois', TensorProto.FLOAT, [8, 4])
bi = helper.make_tensor_value_info('batch_ind', TensorProto.INT64, [8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 4, 5, 5])
node = onnx.helper.make_node(
'RoiAlign',
inputs=['x', 'rois', 'batch_ind'],
outputs=['y'],
spatial_scale=2.0,
output_height=5,
output_width=5,
sampling_ratio=3,
mode="avg",
coordinate_transformation_mode="output_half_pixel")
return ([node], [x, roi, bi], [y])
@onnx_test @onnx_test
def scatter_test(): def scatter_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
...@@ -4173,6 +4479,44 @@ def softmax_nonstd_input_test(): ...@@ -4173,6 +4479,44 @@ def softmax_nonstd_input_test():
return ([node0, node1], [x], [y]) return ([node0, node1], [x], [y])
@onnx_test
def softsign_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node('Softsign', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
def softplus_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test
def softsign_nd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 4, 5])
node = onnx.helper.make_node('Softsign', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
def softplus_nd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 4, 5])
node = onnx.helper.make_node('Softplus', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test @onnx_test
def split_minus_axis_test(): def split_minus_axis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
...@@ -4730,6 +5074,25 @@ def unknown_aten_test(): ...@@ -4730,6 +5074,25 @@ def unknown_aten_test():
return ([node], [x, y], [a]) return ([node], [x, y], [a])
@onnx_test
def upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(
np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node('Upsample',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor])
@onnx_test @onnx_test
def upsample_test(): def upsample_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
......
No preview for this file type
greaterorequal_test:g

x1
x2y"GreaterOrEqualgreaterorequal_testZ
x1

Z
x2

b
y

B
\ No newline at end of file
hardsigmoid_default_test:i

xy" HardSigmoidhardsigmoid_default_testZ
x




b
y




B
\ No newline at end of file
hardsigmoid_double_test:
4
xy" HardSigmoid*
alpha>*
beta333?hardsigmoid_double_testZ
x
 



b
y
 



B
\ No newline at end of file
hardsigmoid_half_test:f

xy" HardSigmoidhardsigmoid_half_testZ
x





b
y





B
\ No newline at end of file
hardsigmoid_verify_test:X

xy" HardSigmoidhardsigmoid_verify_testZ
x


b
y


B
\ No newline at end of file
hardswish_test:M

xy" HardSwishhardswish_testZ
x


b
y


B
\ No newline at end of file
mean_broadcast_test:Ã

0
1
2
3
4mean"Meanmean_broadcast_testZ
0



Z
1




Z
2

Z
3

Z
4



b
mean




B
\ No newline at end of file
mean_fp16_test:Ž

0
1
2mean"Meanmean_fp16_testZ
0




Z
1




Z
2




b
mean




B
\ No newline at end of file
mean_invalid_broadcast_test:›

0
1
2mean"Meanmean_invalid_broadcast_testZ
0



Z
1



Z
2



b
mean



B
\ No newline at end of file
mean_single_input_test:^

0mean"Meanmean_single_input_testZ
0



b
mean



B
\ No newline at end of file
 mean_test:Í
*
0
1
2
3
4
5
6
7
8
9mean"Mean mean_testZ
0
 


Z
1
 


Z
2
 


Z
3
 


Z
4
 


Z
5
 


Z
6
 


Z
7
 


Z
8
 


Z
9
 


b
mean
 


B
\ No newline at end of file
multinomial_generated_seed_test:
0
inputoutput" Multinomial*
sample_size
multinomial_generated_seed_testZ
input



b
output



B
\ No newline at end of file
nms_test:

boxes
scores
max_output_boxes_per_class
iou_threshold
score_thresholdselected_indices"NonMaxSuppression*
center_point_boxnms_testZ
boxes



Z
scores



Z(
max_output_boxes_per_class

Z
iou_threshold

Z
score_threshold

b"
selected_indices


B
\ No newline at end of file
...@@ -965,6 +965,46 @@ TEST_CASE(depthtospace_simple_test) ...@@ -965,6 +965,46 @@ TEST_CASE(depthtospace_simple_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(spacetodepth_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {2, 2, 10, 10}});
auto tmp1 =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 5, 2, 5, 2}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 5, 1, 2, 4}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 8, 5, 5}}}), tmp3);
auto prog = optimize_onnx("spacetodepth_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(spacetodepth_simple_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 2, 4, 6}});
auto tmp1 =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 2, 2, 3, 2}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 5, 1, 2, 4}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 8, 2, 3}}}), tmp3);
auto prog = optimize_onnx("spacetodepth_simple_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(spacetodepth_invalid_blocksize)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("spacetodepth_invalid_blocksize_test.onnx"); }));
}
TEST_CASE(spacetodepth_nondivisibility_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("spacetodepth_nondivisibility_test.onnx"); }));
}
TEST_CASE(dequantizelinear_test) TEST_CASE(dequantizelinear_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -1509,6 +1549,24 @@ TEST_CASE(greater_bool_test) ...@@ -1509,6 +1549,24 @@ TEST_CASE(greater_bool_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(greaterorequal_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input1 = mm->add_parameter("x1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = mm->add_parameter("x2", migraphx::shape{migraphx::shape::float_type, {3}});
auto temp = mm->add_instruction(migraphx::make_op("less"), input1, input2);
auto bt = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), temp);
auto ge = mm->add_instruction(migraphx::make_op("not"), bt);
mm->add_return({ge});
auto prog = migraphx::parse_onnx("greaterorequal_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_conv_test) TEST_CASE(group_conv_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -1523,6 +1581,140 @@ TEST_CASE(group_conv_test) ...@@ -1523,6 +1581,140 @@ TEST_CASE(group_conv_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(hardsigmoid_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.2;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardsigmoid_double_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::double_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.3;
float beta = 0.7;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_double_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardsigmoid_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::half_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 0.2;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
auto prog = optimize_onnx("hardsigmoid_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(hardswish_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{2, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
float alpha = 1.0 / 6.0;
float beta = 0.5;
auto mb_alpha = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {alpha}}));
auto mb_beta = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {beta}}));
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {0}}));
auto mb_one =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto mul = mm->add_instruction(migraphx::make_op("mul"), mb_alpha, x);
auto add = mm->add_instruction(migraphx::make_op("add"), mb_beta, mul);
auto hardsigmoid = mm->add_instruction(migraphx::make_op("clip"), add, mb_zero, mb_one);
mm->add_instruction(migraphx::make_op("mul"), x, hardsigmoid);
auto prog = optimize_onnx("hardswish_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(if_else_test) TEST_CASE(if_else_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -2300,6 +2492,50 @@ TEST_CASE(maxpool_same_upper_test) ...@@ -2300,6 +2492,50 @@ TEST_CASE(maxpool_same_upper_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(mean_invalid_broadcast_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("mean_invalid_broadcast_test.onnx"); }));
}
TEST_CASE(mean_single_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 3}});
mm->add_return({data0});
auto prog = migraphx::parse_onnx("mean_single_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(mean_test)
{
const std::size_t num_data = 3;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {1, 2, 3}};
auto data0 = mm->add_parameter("0", s);
auto data1 = mm->add_parameter("1", s);
auto data2 = mm->add_parameter("2", s);
auto div_lit = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {num_data}});
auto divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
auto mean = mm->add_instruction(migraphx::make_op("div"), data0, divisor);
divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
data1 = mm->add_instruction(migraphx::make_op("div"), data1, divisor);
mean = mm->add_instruction(migraphx::make_op("add"), mean, data1);
divisor =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), div_lit);
data2 = mm->add_instruction(migraphx::make_op("div"), data2, divisor);
mean = mm->add_instruction(migraphx::make_op("add"), mean, data2);
auto prog = optimize_onnx("mean_fp16_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(min_test) TEST_CASE(min_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -2348,6 +2584,14 @@ TEST_CASE(multinomial_dtype_error_test) ...@@ -2348,6 +2584,14 @@ TEST_CASE(multinomial_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
} }
TEST_CASE(multinomial_generated_seed_test)
{
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(multinomial_int64_test) TEST_CASE(multinomial_int64_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -2404,6 +2648,33 @@ TEST_CASE(neg_test) ...@@ -2404,6 +2648,33 @@ TEST_CASE(neg_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(nms_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {1, 1, 6}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}), b, s, mo, iou, st);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("nms_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(nonzero_dynamic_test) TEST_CASE(nonzero_dynamic_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -2824,6 +3095,14 @@ TEST_CASE(randomnormal_dtype_error_test) ...@@ -2824,6 +3095,14 @@ TEST_CASE(randomnormal_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_dtype_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_dtype_error_test.onnx"); }));
} }
TEST_CASE(randomnormal_generated_seed_test)
{
auto p1 = optimize_onnx("randomnormal_generated_seed_test.onnx");
auto p2 = optimize_onnx("randomnormal_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(randomnormal_shape_error_test) TEST_CASE(randomnormal_shape_error_test)
{ {
EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_shape_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("randomnormal_shape_error_test.onnx"); }));
...@@ -2886,6 +3165,14 @@ TEST_CASE(randomuniform_dtype_error_test) ...@@ -2886,6 +3165,14 @@ TEST_CASE(randomuniform_dtype_error_test)
EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_dtype_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_dtype_error_test.onnx"); }));
} }
TEST_CASE(randomuniform_generated_seed_test)
{
auto p1 = optimize_onnx("randomuniform_generated_seed_test.onnx");
auto p2 = optimize_onnx("randomuniform_generated_seed_test.onnx");
EXPECT(p1 != p2);
}
TEST_CASE(randomuniform_shape_error_test) TEST_CASE(randomuniform_shape_error_test)
{ {
EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_shape_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("randomuniform_shape_error_test.onnx"); }));
...@@ -3356,7 +3643,7 @@ TEST_CASE(resize_nonstd_input_test) ...@@ -3356,7 +3643,7 @@ TEST_CASE(resize_nonstd_input_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(resize_upsample_linear_ac_test) static auto create_upsample_linear_prog()
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
...@@ -3447,6 +3734,12 @@ TEST_CASE(resize_upsample_linear_ac_test) ...@@ -3447,6 +3734,12 @@ TEST_CASE(resize_upsample_linear_ac_test)
auto add1 = mm->add_instruction(migraphx::make_op("add"), mul1, slc10); auto add1 = mm->add_instruction(migraphx::make_op("add"), mul1, slc10);
mm->add_return({add1}); mm->add_return({add1});
return p;
}
TEST_CASE(resize_upsample_linear_ac_test)
{
auto p = create_upsample_linear_prog();
auto prog = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx"); auto prog = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
...@@ -3600,6 +3893,55 @@ TEST_CASE(resize_upsample_pf_test) ...@@ -3600,6 +3893,55 @@ TEST_CASE(resize_upsample_pf_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(roialign_default_test)
{
migraphx::shape sx{migraphx::shape::float_type, {10, 4, 7, 8}};
migraphx::shape srois{migraphx::shape::float_type, {8, 4}};
migraphx::shape sbi{migraphx::shape::int64_type, {8}};
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", sx);
auto rois = mm->add_parameter("rois", srois);
auto bi = mm->add_parameter("batch_ind", sbi);
auto r = mm->add_instruction(migraphx::make_op("roialign"), x, rois, bi);
mm->add_return({r});
auto prog = migraphx::parse_onnx("roialign_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(roialign_test)
{
migraphx::shape sx{migraphx::shape::float_type, {10, 5, 4, 7}};
migraphx::shape srois{migraphx::shape::float_type, {8, 4}};
migraphx::shape sbi{migraphx::shape::int64_type, {8}};
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", sx);
auto rois = mm->add_parameter("rois", srois);
auto bi = mm->add_parameter("batch_ind", sbi);
auto r = mm->add_instruction(
migraphx::make_op("roialign",
{{"coordinate_transformation_mode", "output_half_pixel"},
{"spatial_scale", 2.0f},
{"output_height", 5},
{"output_width", 5},
{"sampling_ratio", 3}}),
x,
rois,
bi);
mm->add_return({r});
auto prog = migraphx::parse_onnx("roialign_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(round_test) TEST_CASE(round_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -3856,6 +4198,86 @@ TEST_CASE(softmax_nonstd_input_test) ...@@ -3856,6 +4198,86 @@ TEST_CASE(softmax_nonstd_input_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(softplus_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softplus_nd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto exp = mm->add_instruction(migraphx::make_op("exp"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), exp, mb_ones);
mm->add_instruction(migraphx::make_op("log"), add);
auto prog = optimize_onnx("softplus_nd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softsign_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto abs = mm->add_instruction(migraphx::make_op("abs"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), abs, mb_ones);
mm->add_instruction(migraphx::make_op("div"), x, add);
auto prog = optimize_onnx("softsign_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(softsign_nd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
auto mb_ones =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
mm->add_literal(migraphx::literal{migraphx::shape{input_type}, {1}}));
auto abs = mm->add_instruction(migraphx::make_op("abs"), x);
auto add = mm->add_instruction(migraphx::make_op("add"), abs, mb_ones);
mm->add_instruction(migraphx::make_op("div"), x, add);
auto prog = optimize_onnx("softsign_nd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_minus_axis_test) TEST_CASE(split_minus_axis_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -4337,6 +4759,13 @@ TEST_CASE(unknown_test_throw) ...@@ -4337,6 +4759,13 @@ TEST_CASE(unknown_test_throw)
EXPECT(test::throws([&] { migraphx::parse_onnx("unknown_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("unknown_test.onnx"); }));
} }
TEST_CASE(upsample_linear_test)
{
auto p = create_upsample_linear_prog();
auto prog = migraphx::parse_onnx("upsample_linear_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(upsample_test) TEST_CASE(upsample_test)
{ {
migraphx::program p; migraphx::program p;
......
 randomnormal_generated_seed_test:
1
inputoutput" RandomNormal*
sample_size
 randomnormal_generated_seed_testZ
input



b
output



B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment