Commit 264a7647 authored by Brian Pickrell's avatar Brian Pickrell
Browse files

Merge branch 'develop' into multinomial_parse_merge

parents d99729f8 8e18544f
deconv_output_padding_3d_test:
%conv_transpose_output_padding_3d_test:
G
x
wy" ConvTranspose*
output_padding@@@*
strides@@@deconv_output_padding_3d_testZ
strides@@@%conv_transpose_output_padding_3d_testZ
x


......@@ -25,4 +25,4 @@ G


B
\ No newline at end of file
B
\ No newline at end of file
deconv_output_padding_test:
"conv_transpose_output_padding_test:
C
x
wy" ConvTranspose*
output_padding@@*
strides@@deconv_output_padding_testZ
strides@@"conv_transpose_output_padding_testZ
x


......@@ -22,4 +22,4 @@ C


B
B
\ No newline at end of file
deconv_output_shape_3d_test:
#conv_transpose_output_shape_3d_test:
E
x
wy" ConvTranspose*
output_shape@
@@*
strides@@@deconv_output_shape_3d_testZ
strides@@@#conv_transpose_output_shape_3d_testZ
x


......@@ -26,4 +26,4 @@ E


B
\ No newline at end of file
B
\ No newline at end of file
deconv_output_shape_test:
 conv_transpose_output_shape_test:
A
x
wy" ConvTranspose*
output_shape@
@*
strides@@deconv_output_shape_testZ
strides@@ conv_transpose_output_shape_testZ
x


......@@ -23,4 +23,4 @@ A


B
B
\ No newline at end of file
deconv_input_pads_test:®
=
conv_transpose_stride_test:
*
x
wy" ConvTranspose*
pads@@@@ *
strides@@ deconv_input_pads_testZ
wy" ConvTranspose*
strides@@conv_transpose_stride_testZ
x


......@@ -21,4 +20,4 @@



B
B
\ No newline at end of file
 deconv_test:
conv_transpose_test:

x
wyconv1" ConvTranspose deconv_testZ
wyconv1" ConvTransposeconv_transpose_testZ
x


......@@ -19,4 +19,4 @@



B
B
\ No newline at end of file
......@@ -1376,7 +1376,7 @@ def cosh_test():
@onnx_test()
def deconv_test():
def conv_transpose_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
......@@ -1390,7 +1390,7 @@ def deconv_test():
@onnx_test()
def deconv_bias_test():
def conv_transpose_bias_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
......@@ -1405,7 +1405,7 @@ def deconv_bias_test():
@onnx_test()
def deconv_input_pads_strides_test():
def conv_transpose_input_pads_strides_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 5])
......@@ -1420,7 +1420,7 @@ def deconv_input_pads_strides_test():
@onnx_test()
def deconv_input_pads_asymm_test():
def conv_transpose_input_pads_asymm_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6])
......@@ -1435,7 +1435,7 @@ def deconv_input_pads_asymm_test():
@onnx_test()
def deconv_input_pads_asymm_1d_test():
def conv_transpose_input_pads_asymm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 6])
......@@ -1451,7 +1451,7 @@ def deconv_input_pads_asymm_1d_test():
@onnx_test()
def deconv_output_padding_test():
def conv_transpose_output_padding_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
......@@ -1466,7 +1466,7 @@ def deconv_output_padding_test():
@onnx_test()
def deconv_output_padding_3d_test():
def conv_transpose_output_padding_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
......@@ -1481,7 +1481,7 @@ def deconv_output_padding_3d_test():
@onnx_test()
def deconv_output_shape_test():
def conv_transpose_output_shape_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
......@@ -1496,7 +1496,7 @@ def deconv_output_shape_test():
@onnx_test()
def deconv_output_shape_3d_test():
def conv_transpose_output_shape_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
......@@ -1511,7 +1511,7 @@ def deconv_output_shape_3d_test():
@onnx_test()
def deconv_stride_test():
def conv_transpose_stride_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 3])
......@@ -1524,6 +1524,81 @@ def deconv_stride_test():
return ([node], [x, w], [y])
@onnx_test()
def conv_transpose_auto_pad_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 3, 3])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w'],
outputs=['y'],
auto_pad='SAME_UPPER')
return ([node], [x, w], [y])
@onnx_test()
def conv_transpose_dyn_asym_padding_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[0, 0, 1, 1])
return ([node], [x, w], [y])
@onnx_test()
def conv_transpose_dyn_output_shape_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 2, 10, 8])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_shape=[10, 8])
return ([node], [x, w], [y])
@onnx_test()
def conv_transpose_dyn_batch_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 3, 3])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 5, 5])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w'],
outputs=['y'])
return ([node], [x, w], [y])
@onnx_test()
def conv_transpose_dyn_img_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[1, 1, None, None])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w'],
outputs=['y'])
return ([node], [x, w], [y])
@onnx_test()
def depthtospace_test():
......@@ -6107,6 +6182,101 @@ def shape_test():
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test0():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Shape',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test1():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], start=2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test2():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test3():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=1,
end=2)
return ([node], [x], [y])
@onnx_test()
def shape_end_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], end=5)
return ([node], [x], [y])
@onnx_test()
def shape_start_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-6)
return ([node], [x], [y])
@onnx_test()
def shape_end_less_start_error():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=3,
end=1)
return ([node], [x], [y])
@onnx_test()
def shape_gather_test():
values = np.array([1])
......
......@@ -440,14 +440,13 @@ TEST_CASE(batch_norm_flat_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {1}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_flat_test.onnx");
......@@ -465,14 +464,13 @@ TEST_CASE(batch_norm_rank_2_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {5}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {5}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_rank_2_test.onnx");
......@@ -490,7 +488,6 @@ TEST_CASE(batch_norm_1d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), scale);
......@@ -498,11 +495,11 @@ TEST_CASE(batch_norm_1d_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_1d_test.onnx");
......@@ -520,7 +517,6 @@ TEST_CASE(batch_norm_2d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
......@@ -528,11 +524,11 @@ TEST_CASE(batch_norm_2d_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_2d_test.onnx");
......@@ -550,7 +546,6 @@ TEST_CASE(batch_norm_3d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::half_type, {2}});
auto var = mm->add_parameter("variance", {migraphx::shape::half_type, {2}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-6f}});
auto usq_scale =
......@@ -561,12 +556,13 @@ TEST_CASE(batch_norm_3d_test)
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_3d_test.onnx");
EXPECT(p == prog);
......@@ -908,7 +904,6 @@ TEST_CASE(constant_test)
TEST_CASE(constant_fill_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
......@@ -1105,7 +1100,6 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
uint64_t axis = 1;
......@@ -1120,25 +1114,12 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p5);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p6);
auto mb_mean = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_mean);
auto numer = mm->add_instruction(migraphx::make_op("sub"), l5, mb_mean);
auto mb_eps =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), eps);
auto var_eps = mm->add_instruction(migraphx::make_op("add"), usq_var, mb_eps);
auto mb_rt =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), rt);
auto denom = mm->add_instruction(migraphx::make_op("pow"), var_eps, mb_rt);
auto mb_denom = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), denom);
auto div0 = mm->add_instruction(migraphx::make_op("div"), numer, mb_denom);
auto mb_scale = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_scale);
auto r0 = mm->add_instruction(migraphx::make_op("mul"), div0, mb_scale);
auto mb_bias = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_bias);
auto l6 = mm->add_instruction(migraphx::make_op("add"), r0, mb_bias);
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {l5, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
auto l6 = add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto l7 = mm->add_instruction(migraphx::make_op("relu"), l6);
mm->add_instruction(migraphx::make_op("pooling",
......@@ -1432,19 +1413,19 @@ TEST_CASE(cosh_test)
EXPECT(p == prog);
}
TEST_CASE(deconv_test)
TEST_CASE(conv_transpose_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 1, 3, 3}});
mm->add_instruction(migraphx::make_op("deconvolution"), l0, l1);
mm->add_instruction(migraphx::make_op("convolution_backwards"), l0, l1);
auto prog = optimize_onnx("deconv_test.onnx");
auto prog = optimize_onnx("conv_transpose_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_bias_test)
TEST_CASE(conv_transpose_bias_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -1452,120 +1433,181 @@ TEST_CASE(deconv_bias_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l2 = mm->add_parameter("b", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = mm->add_instruction(migraphx::make_op("deconvolution"), l0, l1);
auto l3 = mm->add_instruction(migraphx::make_op("convolution_backwards"), l0, l1);
auto l4 = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", l3->get_shape().lens()}}), l2);
mm->add_instruction(migraphx::make_op("add"), l3, l4);
auto prog = optimize_onnx("deconv_bias_test.onnx");
auto prog = optimize_onnx("conv_transpose_bias_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_input_pads_strides_test)
TEST_CASE(conv_transpose_input_pads_strides_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3}});
mm->add_instruction(
migraphx::make_op("deconvolution", {{"padding", {1, 1}}, {"stride", {3, 2}}}), l0, l1);
migraphx::make_op("convolution_backwards", {{"padding", {1, 1}}, {"stride", {3, 2}}}),
l0,
l1);
auto prog = optimize_onnx("deconv_input_pads_strides_test.onnx");
auto prog = optimize_onnx("conv_transpose_input_pads_strides_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_input_pads_asymm_test)
TEST_CASE(conv_transpose_input_pads_asymm_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution", {{"padding", {0, 0}}, {"stride", {3, 2}}}), l0, l1);
migraphx::make_op("convolution_backwards", {{"padding", {0, 0}}, {"stride", {3, 2}}}),
l0,
l1);
mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {0, 0}}, {"ends", {8, 6}}}), l2);
auto prog = optimize_onnx("deconv_input_pads_asymm_test.onnx");
auto prog = optimize_onnx("conv_transpose_input_pads_asymm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_input_pads_asymm_1d_test)
TEST_CASE(conv_transpose_input_pads_asymm_1d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution",
{{"padding", {0, 0}}, {"stride", {2}}, {"dilation", {1}}}),
migraphx::make_op("convolution_backwards",
{{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}),
l2);
auto prog = optimize_onnx("deconv_input_pads_asymm_1d_test.onnx");
auto prog = optimize_onnx("conv_transpose_input_pads_asymm_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_output_padding_test)
TEST_CASE(conv_transpose_output_padding_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution", {{"padding", {0, 0}}, {"stride", {3, 2}}}), l0, l1);
migraphx::make_op("convolution_backwards", {{"padding", {0, 0}}, {"stride", {3, 2}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 1, 1}}}), l2);
auto prog = optimize_onnx("deconv_output_padding_test.onnx");
auto prog = optimize_onnx("conv_transpose_output_padding_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_output_padding_3d_test)
TEST_CASE(conv_transpose_output_padding_3d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution",
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
auto prog = optimize_onnx("deconv_output_padding_3d_test.onnx");
auto prog = optimize_onnx("conv_transpose_output_padding_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_output_shape_test)
TEST_CASE(conv_transpose_output_shape_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution", {{"padding", {0, 0}}, {"stride", {3, 2}}}), l0, l1);
migraphx::make_op("convolution_backwards", {{"padding", {0, 0}}, {"stride", {3, 2}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 1, 1}}}), l2);
auto prog = optimize_onnx("deconv_output_shape_test.onnx");
auto prog = optimize_onnx("conv_transpose_output_shape_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(deconv_output_shape_3d_test)
TEST_CASE(conv_transpose_output_shape_3d_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("x", {migraphx::shape::float_type, {1, 1, 3, 3, 3}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("deconvolution",
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
auto prog = optimize_onnx("deconv_output_shape_3d_test.onnx");
auto prog = optimize_onnx("conv_transpose_output_shape_3d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(conv_transpose_auto_pad_error)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("conv_transpose_auto_pad_test.onnx"); }));
}
TEST_CASE(conv_transpose_dyn_asym_padding_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("conv_transpose_dyn_asym_padding_test.onnx", options); }));
}
TEST_CASE(conv_transpose_dyn_output_shape_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("conv_transpose_dyn_output_shape_test.onnx", options); }));
}
TEST_CASE(conv_transpose_dyn_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 =
mm->add_parameter("x", {migraphx::shape::float_type, {{1, 4}, {1, 1}, {3, 3}, {3, 3}}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto ret = mm->add_instruction(migraphx::make_op("convolution_backwards"), l0, l1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("conv_transpose_dyn_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_transpose_dyn_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 =
mm->add_parameter("x", {migraphx::shape::float_type, {{1, 1}, {1, 1}, {3, 6}, {3, 6}}});
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 1, 3, 3}});
auto ret = mm->add_instruction(migraphx::make_op("convolution_backwards"), l0, l1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {3, 6};
auto prog = parse_onnx("conv_transpose_dyn_img_test.onnx", options);
EXPECT(p == prog);
}
......@@ -6054,6 +6096,118 @@ TEST_CASE(shape_test)
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test0.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test1.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test2.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 2}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test3.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_end_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_start_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_start_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_less_start_error)
{
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
EXPECT(test::throws([&] { migraphx::parse_onnx("shape_end_less_start_error.onnx", options); }));
}
TEST_CASE(shape_gather_test)
{
migraphx::program p;
......@@ -7125,7 +7279,8 @@ TEST_CASE(variable_batch_user_input_test6)
TEST_CASE(variable_batch_user_input_test7)
{
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static shape
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static
// shape
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 16, 16}});
......
No preview for this file type
......@@ -48,7 +48,7 @@ TEST_CASE(averagepool_notset_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {12};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(averagepool_nt_cip_test)
......@@ -66,7 +66,7 @@ TEST_CASE(averagepool_nt_cip_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {8.33333};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_flat_test)
......@@ -112,7 +112,7 @@ TEST_CASE(batch_norm_flat_test)
0.43305403,
0.4408022,
0.42019472};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
......@@ -149,7 +149,7 @@ TEST_CASE(batch_norm_rank_2_test)
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
......@@ -185,7 +185,7 @@ TEST_CASE(batch_norm_1d_test)
0.4927, 0.771, -1.956, -2.123, -0.664, -0.583, -0.7207, -0.5127};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_2d_test)
......@@ -251,7 +251,7 @@ TEST_CASE(batch_norm_2d_test)
-2.76707697e+00, 1.47579327e+01, 4.94736385e+00, 2.68847847e+01, -6.49254417e+00,
1.94286156e+00, -7.19223642e+00, -3.70413971e+00, -4.04303551e-01, -1.01827660e+01,
1.49476433e+00};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_3d_test)
......@@ -293,7 +293,7 @@ TEST_CASE(batch_norm_3d_test)
6.098, 11.03, 2.81, 2.81, 2.81, 12.125, 3.143, 8.53, 17.52, 4.938, 15.71,
1.347, 4.938, 1.167, 6.098, 12.67, 12.67, 4.453, 4.453, -0.4768, 12.67};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(celu_verify_test)
......@@ -315,7 +315,7 @@ TEST_CASE(celu_verify_test)
std::transform(data.begin(), data.end(), correct.begin(), [&](auto x) {
return std::max(0.0f, x) + std::min(0.0f, alpha * std::expm1(x / alpha));
});
EXPECT(migraphx::verify_range(result_vector, correct));
EXPECT(migraphx::verify::verify_range(result_vector, correct));
}
TEST_CASE(clip_args_type_mismatch)
......@@ -331,7 +331,7 @@ TEST_CASE(clip_args_type_mismatch)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.5, 2, 2, 1.9, 2.5, 3, 2.9, 3.2, 3.7};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(depthtospace_simple_test)
......@@ -349,7 +349,7 @@ TEST_CASE(depthtospace_simple_test)
std::vector<float> gold = {0, 12, 1, 13, 2, 14, 24, 36, 25, 37, 26, 38, 3, 15, 4, 16,
5, 17, 27, 39, 28, 40, 29, 41, 6, 18, 7, 19, 8, 20, 30, 42,
31, 43, 32, 44, 9, 21, 10, 22, 11, 23, 33, 45, 34, 46, 35, 47};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(spacetodepth_simple_test)
......@@ -367,7 +367,7 @@ TEST_CASE(spacetodepth_simple_test)
std::vector<float> gold = {0, 2, 4, 12, 14, 16, 24, 26, 28, 36, 38, 40, 1, 3, 5, 13,
15, 17, 25, 27, 29, 37, 39, 41, 6, 8, 10, 18, 20, 22, 30, 32,
34, 42, 44, 46, 7, 9, 11, 19, 21, 23, 31, 33, 35, 43, 45, 47};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(spacetodepth_depthtospace_test)
......@@ -389,7 +389,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
auto result2 = p2.eval(pp2).back();
std::vector<float> result_vector2;
result2.visit([&](auto output) { result_vector2.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(result_vector2, data_in));
EXPECT(migraphx::verify::verify_range(result_vector2, data_in));
}
TEST_CASE(eyelike_verify_test)
......@@ -407,7 +407,7 @@ TEST_CASE(eyelike_verify_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.};
EXPECT(migraphx::verify_range(result_vector, eyelike_mat));
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
}
TEST_CASE(eyelike_verify_negk_test)
......@@ -425,7 +425,7 @@ TEST_CASE(eyelike_verify_negk_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> eyelike_mat = {0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.};
EXPECT(migraphx::verify_range(result_vector, eyelike_mat));
EXPECT(migraphx::verify::verify_range(result_vector, eyelike_mat));
}
TEST_CASE(gather_elements)
......@@ -448,7 +448,7 @@ TEST_CASE(gather_elements)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.125, 0.5625, -0.9375, 0.25, 0.5625, 0.9375};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(gemm_test)
......@@ -492,7 +492,7 @@ TEST_CASE(gemm_test)
0.8098607, 1.2157929, 1.1010075, 1.0706307, 1.0429881, 1.1771785, 1.2362702,
0.8239243, 1.1112559, 0.9639262, 1.0813537, 0.8825792, 1.121141, 1.1885703,
1.2227502, 1.4568202, 1.1388762, 1.55058, 1.0958102, 1.4637487, 1.5756242};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(gemm_half_test)
......@@ -536,7 +536,7 @@ TEST_CASE(gemm_half_test)
2.143, 2.062, 1.921, 1.836, 2.203, 1.952, 1.055, 1.225, 1.418, 1.209, 1.155,
1.42, 1.234, 1.302, 1.593, 1.368, 1.289, 1.327, 1.451, 1.394};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(greaterorequal_test)
......@@ -557,7 +557,7 @@ TEST_CASE(greaterorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, 0.0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(hardsigmoid_verify_test)
......@@ -581,7 +581,7 @@ TEST_CASE(hardsigmoid_verify_test)
std::transform(data.begin(), data.end(), gold.begin(), [&](auto x) {
return std::max(0.0f, std::min(x * alpha + beta, 1.0f));
});
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_else_test)
......@@ -603,7 +603,7 @@ TEST_CASE(if_else_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0866565, -0.371067, 0.017719, 0.0250614, 0.0612539, -0.744683};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_else_test_inlined)
......@@ -622,7 +622,7 @@ TEST_CASE(if_else_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0507132, -0.712328, 0.0105797, 0.04569, 0.0185013, -1.16472};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_then_test)
......@@ -645,7 +645,7 @@ TEST_CASE(if_then_test)
// onnx adds ones so result should be just + 1.0
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_then_test_inlined)
......@@ -664,7 +664,7 @@ TEST_CASE(if_then_test_inlined)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_literal_test)
......@@ -689,14 +689,14 @@ TEST_CASE(if_literal_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {1, 2, 3, 4, 5};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {5, 4, 3, 2, 1};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
}
......@@ -727,7 +727,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_then_else_multi_output_shapes_test)
......@@ -758,7 +758,7 @@ TEST_CASE(if_then_else_multi_output_shapes_test)
std::vector<float> gold = {
1.0625, 1.75, 0.9375, 1.125, 0.875, 0.4375, 0.125, 1.50, -0.125, 0.250, -0.250, -1.125};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(if_pl_test)
......@@ -790,14 +790,14 @@ TEST_CASE(if_pl_test)
{
auto result_vector = run_prog(true);
std::vector<float> gold = {2, 3, 4, 5, 6, 7};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
// else branch
{
auto result_vector = run_prog(false);
std::vector<float> gold = {1, 2, 3, 4, 5, 6};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
}
......@@ -836,8 +836,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(true);
std::vector<float> gold0(4, 2.0f);
std::vector<float> gold1(12, 4.0f);
EXPECT(migraphx::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
}
// else branch
......@@ -845,8 +845,8 @@ TEST_CASE(if_tuple_test)
auto results = run_prog(false);
std::vector<float> gold0(4, 3.0f);
std::vector<float> gold1(12, 5.0f);
EXPECT(migraphx::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify_range(results.at(1), gold1));
EXPECT(migraphx::verify::verify_range(results.at(0), gold0));
EXPECT(migraphx::verify::verify_range(results.at(1), gold1));
}
}
......@@ -877,7 +877,7 @@ TEST_CASE(instance_norm_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(instance_norm_dyn_batch_test)
......@@ -919,7 +919,7 @@ TEST_CASE(instance_norm_dyn_batch_test)
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(instance_norm_3d_test)
......@@ -948,7 +948,7 @@ TEST_CASE(instance_norm_3d_test)
3.18218,
4.05505};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(lessorequal_test)
......@@ -969,7 +969,7 @@ TEST_CASE(lessorequal_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 1};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(lpnormalization_1norm)
......@@ -997,7 +997,7 @@ TEST_CASE(lpnormalization_1norm)
3.f / 7.f,
0.f,
0.f};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(lpnormalization_2norm)
......@@ -1025,7 +1025,7 @@ TEST_CASE(lpnormalization_2norm)
3.f / 5.f,
0.f,
0.f};
EXPECT(migraphx::verify_range(result_vector, correct));
EXPECT(migraphx::verify::verify_range(result_vector, correct));
}
TEST_CASE(mean_broadcast_test)
......@@ -1056,7 +1056,7 @@ TEST_CASE(mean_broadcast_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold(24, 3);
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mean_test)
......@@ -1083,7 +1083,7 @@ TEST_CASE(mean_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0.0) / num_data;
std::vector<double> gold(num_elms, mean);
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mean_integral_test)
......@@ -1110,7 +1110,7 @@ TEST_CASE(mean_integral_test)
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0) / num_data;
std::vector<int> gold(num_elms, mean);
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mod_test)
......@@ -1137,7 +1137,7 @@ TEST_CASE(mod_test)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_different_types)
......@@ -1165,7 +1165,7 @@ TEST_CASE(mod_test_different_types)
std::vector<int32_t> gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2,
5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod)
......@@ -1194,7 +1194,7 @@ TEST_CASE(mod_test_fmod)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(mod_test_fmod_different_types)
......@@ -1224,7 +1224,7 @@ TEST_CASE(mod_test_fmod_different_types)
10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2,
7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(nonzero_test)
......@@ -1243,7 +1243,7 @@ TEST_CASE(nonzero_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 0, 0, 1, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(resize_downsample_f_test)
......@@ -1264,7 +1264,7 @@ TEST_CASE(resize_downsample_f_test)
std::vector<float> gold = {0.0f, 3.0f};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_ac_test)
......@@ -1299,7 +1299,7 @@ TEST_CASE(resize_upsample_linear_ac_test)
11.0f / 3,
4};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_test)
......@@ -1320,7 +1320,7 @@ TEST_CASE(resize_upsample_linear_test)
std::vector<float> gold = {
1, 1.25, 1.75, 2, 1.5, 1.75, 2.25, 2.5, 2.5, 2.75, 3.25, 3.5, 3, 3.25, 3.75, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(resize_upsample_pf_test)
......@@ -1341,7 +1341,7 @@ TEST_CASE(resize_upsample_pf_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_4D_verify_test)
......@@ -1362,7 +1362,7 @@ TEST_CASE(reversesequence_4D_verify_test)
std::vector<float> gold = {
8.0, 9.0, 10.0, 11.0, 4.0, 5.0, 6.0, 7.0, 0.0, 1.0, 2.0, 3.0, 12.0, 13.0, 14.0, 15.0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_batch_verify_test)
......@@ -1383,7 +1383,7 @@ TEST_CASE(reversesequence_batch_verify_test)
std::vector<float> gold = {
0.0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, 7.0, 10.0, 9.0, 8.0, 11.0, 15.0, 14.0, 13.0, 12.0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_time_verify_test)
......@@ -1404,7 +1404,7 @@ TEST_CASE(reversesequence_time_verify_test)
std::vector<float> gold = {
3.0, 6.0, 9.0, 12.0, 2.0, 5.0, 8.0, 13.0, 1.0, 4.0, 10.0, 14.0, 0.0, 7.0, 11.0, 15.0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(selu_test)
......@@ -1424,7 +1424,7 @@ TEST_CASE(selu_test)
std::vector<float> gold = {0.55, 1.05, 0, -0.10912, -0.149251, 6};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(size_verify_test)
......@@ -1458,7 +1458,7 @@ TEST_CASE(slice_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2, 3};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(slice_5arg_test)
......@@ -1478,7 +1478,7 @@ TEST_CASE(slice_5arg_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {10, 11, 12, 13, 15, 16, 17, 18};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(slice_reverse_test)
......@@ -1498,7 +1498,7 @@ TEST_CASE(slice_reverse_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 13, 12, 11, 19, 18, 17, 16};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(slice_step_test)
......@@ -1518,7 +1518,7 @@ TEST_CASE(slice_step_test)
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {14, 12};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(softplus_test)
......@@ -1539,7 +1539,7 @@ TEST_CASE(softplus_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return std::log1p(std::exp(x)); });
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(softsign_test)
......@@ -1560,7 +1560,7 @@ TEST_CASE(softsign_test)
std::transform(
data.begin(), data.end(), gold.begin(), [](auto x) { return x / (1.0 + std::abs(x)); });
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(upsample_test)
......@@ -1579,7 +1579,7 @@ TEST_CASE(upsample_test)
std::vector<float> gold = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(where_test)
......@@ -1621,7 +1621,7 @@ TEST_CASE(where_test)
2.0f,
1.0f,
2.0f};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::program& p)
......@@ -1646,7 +1646,7 @@ TEST_CASE(trilu_test)
std::vector<float> gold = {1, 2, 3, 4, 0, 6, 7, 8, 0, 0, 11, 12};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
......@@ -1657,7 +1657,7 @@ TEST_CASE(trilu_batch_diff_k_test)
std::vector<float> gold = {0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
......@@ -1668,7 +1668,7 @@ TEST_CASE(trilu_lower_test)
std::vector<float> gold = {0, 0, 0, 0, 5, 0, 0, 0, 9, 10, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
......@@ -1679,7 +1679,7 @@ TEST_CASE(trilu_out_k_test)
std::vector<float> gold(12, 0);
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
......@@ -1690,7 +1690,7 @@ TEST_CASE(trilu_row_one_test)
std::vector<float> gold = {0, 2, 3, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -453,37 +453,143 @@ TEST_CASE(contiguous_shape_singleton_dim)
expect_shape(output, migraphx::make_op("contiguous"), input);
}
TEST_CASE(deconvolution_shape)
TEST_CASE(convolution_backwards_1d)
{
migraphx::shape input_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape output_1d{migraphx::shape::float_type, {4, 3, 3}};
expect_shape(output_1d,
migraphx::make_op("convolution_backwards",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
input_1d,
weights_1d);
}
TEST_CASE(convolution_backwards_2d)
{
migraphx::shape input{migraphx::shape::float_type, {4, 4, 1, 1}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 3, 3}};
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
throws_shape(migraphx::make_op("convolution_backwards"), input);
throws_shape(migraphx::make_op("convolution_backwards",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
input);
}
TEST_CASE(convolution_backwards_1padding)
{
migraphx::shape input{migraphx::shape::float_type, {4, 4, 1, 1}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
expect_shape(output, migraphx::make_op("deconvolution"), input, weights);
throws_shape(migraphx::make_op("deconvolution"), input);
throws_shape(
migraphx::make_op("deconvolution", {{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
input);
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
expect_shape(output,
migraphx::make_op("convolution_backwards",
{{"padding", {1, 1}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input,
weights);
}
migraphx::shape input_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape output_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
expect_shape(
output_1d,
migraphx::make_op("deconvolution", {{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
input_1d,
weights_1d);
TEST_CASE(convolution_backwards_2stride)
{
migraphx::shape input{migraphx::shape::float_type, {4, 4, 4, 4}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 9, 9}};
expect_shape(output,
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
input,
weights);
}
TEST_CASE(convolution_backwards_2dilation)
{
migraphx::shape input{migraphx::shape::float_type, {4, 4, 4, 4}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 8, 8}};
expect_shape(output,
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {2, 2}}}),
input,
weights);
}
TEST_CASE(convolution_backwards_3d)
{
migraphx::shape input_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}};
migraphx::shape output_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
expect_shape(
output_3d,
migraphx::make_op("deconvolution",
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {1, 1, 1}}, {"dilation", {1, 1, 1}}}),
input_3d,
weights_3d);
}
TEST_CASE(convolution_backwards_channel_mismatch)
{
migraphx::shape input{migraphx::shape::float_type, {4, 4, 1, 1}};
migraphx::shape weights{migraphx::shape::float_type, {3, 3, 3, 3}};
throws_shape(migraphx::make_op("convolution_backwards"), input, weights);
}
TEST_CASE(convolution_backwards_dyn_batch_2d)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {4, 4}, {1, 1}, {1, 1}}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {{1, 4}, {3, 3}, {3, 3}, {3, 3}}};
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
}
TEST_CASE(convolution_backwards_dyn_img_2d)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 1}, {4, 4}, {1, 5}, {1, 5}}};
migraphx::shape weights{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {{1, 1}, {3, 3}, {3, 7}, {3, 7}}};
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
}
TEST_CASE(convolution_backwards_dyn_kernel_2d)
{
migraphx::shape input{migraphx::shape::float_type, {1, 4, 1, 1}};
migraphx::shape weights{migraphx::shape::float_type, {{4, 4}, {3, 3}, {2, 6}, {2, 6}}};
migraphx::shape output{migraphx::shape::float_type, {{1, 1}, {3, 3}, {2, 6}, {2, 6}}};
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
}
TEST_CASE(dimensions_of0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {4}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"end", 4}}), input);
}
TEST_CASE(dimensions_of1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of2)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}, {2, 4}, {1, 6, {2}}}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 0}}), input);
}
TEST_CASE(dot_ndim_error0)
{
migraphx::shape s_m1{migraphx::shape::float_type, {5}};
......@@ -1134,7 +1240,7 @@ TEST_CASE(inconsistent_attr_shape)
{{"padding", {1, 1}}, {"stride", {2}}, {"dilation", {3, 3, 3}}}),
input,
weights);
throws_shape(migraphx::make_op("deconvolution",
throws_shape(migraphx::make_op("convolution_backwards",
{{"padding", {1, 1}}, {"stride", {2}}, {"dilation", {3, 3, 3}}}),
input,
weights);
......@@ -2104,6 +2210,19 @@ TEST_CASE(prefix_scan_sum_dyn)
}
}
TEST_CASE(prefix_scan_sum_dyn_2d)
{
{
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}, {3, 7}};
migraphx::shape s{migraphx::shape::float_type, dd};
expect_shape(
s,
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", 0}, {"reverse", 0}}),
s);
}
}
TEST_CASE(quant_convolution_shape)
{
migraphx::shape output{migraphx::shape::int32_type, {4, 4, 1, 1}};
......
......@@ -40,12 +40,12 @@ TEST_CASE(const_add)
auto one = m1.add_literal(1);
auto two = m1.add_literal(2);
auto sum = m1.add_instruction(migraphx::make_op("add"), one, two);
m1.add_instruction(pass_op{}, sum);
m1.add_instruction(non_const_pass_op{}, sum);
run_pass(m1);
migraphx::module m2;
auto total = m2.add_literal(3);
m2.add_instruction(pass_op{}, total);
m2.add_instruction(non_const_pass_op{}, total);
EXPECT(m1 == m2);
}
......@@ -55,12 +55,12 @@ TEST_CASE(const_add_parameter)
auto one = m1.add_parameter("one", {migraphx::shape::int32_type, {1}});
auto two = m1.add_literal(2);
auto sum = m1.add_instruction(migraphx::make_op("add"), one, two);
m1.add_instruction(pass_op{}, sum);
m1.add_instruction(non_const_pass_op{}, sum);
run_pass(m1);
migraphx::module m2;
auto total = m2.add_literal(3);
m2.add_instruction(pass_op{}, total);
m2.add_instruction(non_const_pass_op{}, total);
EXPECT(m1 != m2);
}
......@@ -71,12 +71,12 @@ TEST_CASE(const_multiadd)
auto two = m1.add_literal(2);
auto sum1 = m1.add_instruction(migraphx::make_op("add"), one, two);
auto sum2 = m1.add_instruction(migraphx::make_op("add"), sum1, two);
m1.add_instruction(pass_op{}, sum2);
m1.add_instruction(non_const_pass_op{}, sum2);
run_pass(m1);
migraphx::module m2;
auto total = m2.add_literal(5);
m2.add_instruction(pass_op{}, total);
m2.add_instruction(non_const_pass_op{}, total);
EXPECT(m1 == m2);
}
......@@ -88,12 +88,12 @@ TEST_CASE(const_add_mul)
auto mul = m1.add_instruction(migraphx::make_op("mul"), two, two);
auto sum1 = m1.add_instruction(migraphx::make_op("add"), one, mul);
auto sum2 = m1.add_instruction(migraphx::make_op("add"), sum1, two);
m1.add_instruction(pass_op{}, sum2);
m1.add_instruction(non_const_pass_op{}, sum2);
run_pass(m1);
migraphx::module m2;
auto total = m2.add_literal(7);
m2.add_instruction(pass_op{}, total);
m2.add_instruction(non_const_pass_op{}, total);
EXPECT(m1 == m2);
}
......@@ -105,13 +105,13 @@ TEST_CASE(const_add_scalar)
auto two = m1.add_instruction(migraphx::make_op("scalar", {{"scalar_bcst_dims", {2, 2}}}),
m1.add_literal(2));
auto sum = m1.add_instruction(migraphx::make_op("add"), one, two);
m1.add_instruction(pass_op{}, sum);
m1.add_instruction(non_const_pass_op{}, sum);
run_pass(m1);
migraphx::module m2;
auto total =
m2.add_literal(migraphx::literal{{migraphx::shape::int32_type, {2, 2}}, {3, 3, 3, 3}});
m2.add_instruction(pass_op{}, total);
m2.add_instruction(non_const_pass_op{}, total);
EXPECT(m1 == m2);
}
......@@ -121,7 +121,7 @@ TEST_CASE(const_scalar)
{
auto one = m1.add_instruction(migraphx::make_op("scalar", {{"scalar_bcst_dims", {2, 2}}}),
m1.add_literal(1));
m1.add_instruction(pass_op{}, one);
m1.add_instruction(non_const_pass_op{}, one);
}
run_pass(m1);
......@@ -129,7 +129,7 @@ TEST_CASE(const_scalar)
{
auto one = m2.add_instruction(migraphx::make_op("scalar", {{"scalar_bcst_dims", {2, 2}}}),
m2.add_literal(1));
m2.add_instruction(pass_op{}, one);
m2.add_instruction(non_const_pass_op{}, one);
}
EXPECT(m1 == m2);
}
......
......@@ -51,6 +51,7 @@ add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(op test_op.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment