Commit 94cf1bf3 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into nhwc_workaround

parents c0547e9a 49280e51
This diff is collapsed.
3be6eb53c8b359703cb645ed2cb1cdf106924b7c
d3295f4329d744fe1f8419e1220e123807282b99
......@@ -6165,6 +6165,101 @@ def shape_test():
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test0():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Shape',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test1():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], start=2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test2():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-2)
return ([node], [x], [y])
@onnx_test()
def shape_dyn_test3():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=1,
end=2)
return ([node], [x], [y])
@onnx_test()
def shape_end_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape', inputs=['x'], outputs=['y'], end=5)
return ([node], [x], [y])
@onnx_test()
def shape_start_oob_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=-6)
return ([node], [x], [y])
@onnx_test()
def shape_end_less_start_error():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
[None, 4, None, None])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2])
node = onnx.helper.make_node('Shape',
inputs=['x'],
outputs=['y'],
start=3,
end=1)
return ([node], [x], [y])
@onnx_test()
def shape_gather_test():
values = np.array([1])
......
......@@ -440,14 +440,13 @@ TEST_CASE(batch_norm_flat_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {1}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_flat_test.onnx");
......@@ -465,14 +464,13 @@ TEST_CASE(batch_norm_rank_2_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {5}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {5}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), {var_eps});
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_rank_2_test.onnx");
......@@ -490,7 +488,6 @@ TEST_CASE(batch_norm_1d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), scale);
......@@ -498,11 +495,11 @@ TEST_CASE(batch_norm_1d_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_1d_test.onnx");
......@@ -520,7 +517,6 @@ TEST_CASE(batch_norm_2d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {3}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {3}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
......@@ -528,11 +524,11 @@ TEST_CASE(batch_norm_2d_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_2d_test.onnx");
......@@ -550,7 +546,6 @@ TEST_CASE(batch_norm_3d_test)
auto mean = mm->add_parameter("mean", {migraphx::shape::half_type, {2}});
auto var = mm->add_parameter("variance", {migraphx::shape::half_type, {2}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-6f}});
auto usq_scale =
......@@ -561,12 +556,13 @@ TEST_CASE(batch_norm_3d_test)
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2, 3}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_onnx("batch_norm_3d_test.onnx");
EXPECT(p == prog);
......@@ -908,7 +904,6 @@ TEST_CASE(constant_test)
TEST_CASE(constant_fill_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
......@@ -1105,7 +1100,6 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto p5 = mm->add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = mm->add_parameter("6", {migraphx::shape::float_type, {1}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
uint64_t axis = 1;
......@@ -1120,25 +1114,12 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p5);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), p6);
auto mb_mean = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_mean);
auto numer = mm->add_instruction(migraphx::make_op("sub"), l5, mb_mean);
auto mb_eps =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), eps);
auto var_eps = mm->add_instruction(migraphx::make_op("add"), usq_var, mb_eps);
auto mb_rt =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1}}}), rt);
auto denom = mm->add_instruction(migraphx::make_op("pow"), var_eps, mb_rt);
auto mb_denom = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), denom);
auto div0 = mm->add_instruction(migraphx::make_op("div"), numer, mb_denom);
auto mb_scale = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_scale);
auto r0 = mm->add_instruction(migraphx::make_op("mul"), div0, mb_scale);
auto mb_bias = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 28, 28}}}), usq_bias);
auto l6 = mm->add_instruction(migraphx::make_op("add"), r0, mb_bias);
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {l5, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
auto l6 = add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto l7 = mm->add_instruction(migraphx::make_op("relu"), l6);
mm->add_instruction(migraphx::make_op("pooling",
......@@ -6079,6 +6060,118 @@ TEST_CASE(shape_test)
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test0.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test1.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test2.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_dyn_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret =
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 2}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = parse_onnx("shape_dyn_test3.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_end_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_start_oob_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}}};
auto p0 = mm->add_parameter("x", s);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto ret = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 4}}), p0);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("shape_start_oob_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(shape_end_less_start_error)
{
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{1, 4, {1, 4}}, {4, 4}, {2, 4}, {2, 4}};
EXPECT(test::throws([&] { migraphx::parse_onnx("shape_end_less_start_error.onnx", options); }));
}
TEST_CASE(shape_gather_test)
{
migraphx::program p;
......@@ -7150,7 +7243,8 @@ TEST_CASE(variable_batch_user_input_test6)
TEST_CASE(variable_batch_user_input_test7)
{
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static shape
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static
// shape
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 16, 16}});
......
No preview for this file type
......@@ -557,6 +557,39 @@ TEST_CASE(convolution_backwards_dyn_kernel_2d)
expect_shape(output, migraphx::make_op("convolution_backwards"), input, weights);
}
TEST_CASE(dimensions_of0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {4}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"end", 4}}), input);
}
TEST_CASE(dimensions_of1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 2, 1}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of2)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}, {2, 4}, {1, 6, {2}}}};
migraphx::shape output{migraphx::shape::int64_type, {2}};
expect_shape(output, migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 3}}), input);
}
TEST_CASE(dimensions_of_error1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, {2}}, {2, 4}}};
throws_shape(migraphx::make_op("dimensions_of", {{"start", 3}, {"end", 0}}), input);
}
TEST_CASE(dot_ndim_error0)
{
migraphx::shape s_m1{migraphx::shape::float_type, {5}};
......@@ -2157,6 +2190,32 @@ TEST_CASE(prefix_scan_sum)
}
}
TEST_CASE(prefix_scan_sum_dyn)
{
{
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}};
migraphx::shape s{migraphx::shape::float_type, dd};
expect_shape(
s,
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", 0}, {"reverse", 0}}),
s);
}
}
TEST_CASE(prefix_scan_sum_dyn_2d)
{
{
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}, {3, 7}};
migraphx::shape s{migraphx::shape::float_type, dd};
expect_shape(
s,
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", 0}, {"reverse", 0}}),
s);
}
}
TEST_CASE(quant_convolution_shape)
{
migraphx::shape output{migraphx::shape::int32_type, {4, 4, 1, 1}};
......
......@@ -33,8 +33,8 @@ def test_conv_relu():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
# set offload_copy, fast_match and exhaustive_tune to true
p.compile(migraphx.get_target("gpu"), True, True, True)
# set offload_copy, fast_match to true
p.compile(migraphx.get_target("gpu"), True, True)
print(p)
params = {}
......
......@@ -379,10 +379,7 @@ TEST_CASE(fp16_subgraph)
auto create_fp16_program = [] {
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {1}};
auto l1 = mm->add_literal(migraphx::literal(sd, {1}));
auto l2 = mm->add_literal(migraphx::literal(sd, {2}));
auto l3 = mm->add_literal(migraphx::literal(sd, {3}));
migraphx::shape sd{migraphx::shape::half_type, {1}};
migraphx::shape sx{migraphx::shape::float_type, {1, 4}};
migraphx::shape sy{migraphx::shape::float_type, {3, 4}};
migraphx::shape sc{migraphx::shape::bool_type};
......@@ -390,8 +387,8 @@ TEST_CASE(fp16_subgraph)
auto x = mm->add_parameter("x", sx);
auto y = mm->add_parameter("y", sy);
auto* then_mod = p.create_module("If_6_if");
auto hl1 = then_mod->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), l1);
auto hl2 = then_mod->add_literal(migraphx::literal(sd, {2}));
auto hl1 = then_mod->add_literal(migraphx::literal(sd, {1}));
auto mhl1 = then_mod->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 4}}}), hl1);
auto hx = then_mod->add_instruction(
......@@ -399,8 +396,6 @@ TEST_CASE(fp16_subgraph)
auto ad = then_mod->add_instruction(migraphx::make_op("add"), hx, mhl1);
auto fad = then_mod->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), ad);
auto hl2 = then_mod->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), l2);
auto mhl2 = then_mod->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {3, 4}}}), hl2);
auto hy1 = then_mod->add_instruction(
......@@ -411,8 +406,7 @@ TEST_CASE(fp16_subgraph)
then_mod->add_return({fad, fmu, mu});
auto* else_mod = p.create_module("If_6_else");
auto hl3 = else_mod->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), l3);
auto hl3 = else_mod->add_literal(migraphx::literal(sd, {3}));
auto mhl3 = else_mod->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 4}}}), hl3);
auto hx2 = else_mod->add_instruction(
......
......@@ -1934,6 +1934,42 @@ TEST_CASE(cosh_dyn_test)
EXPECT(migraphx::verify::verify_range(results_vector, gold));
}
TEST_CASE(convert_downcast_overflow_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
std::vector<float> data(4, 2 * std::numeric_limits<migraphx::half>::max());
auto l = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}),
l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<migraphx::half> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(results_vector.begin(), results_vector.end(), [](const auto& x) {
return x == std::numeric_limits<migraphx::half>::max();
}));
}
TEST_CASE(convert_downcast_underflow_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
std::vector<float> data(4, 2 * std::numeric_limits<migraphx::half>::lowest());
auto l = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}),
l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<migraphx::half> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(results_vector.begin(), results_vector.end(), [](const auto& x) {
return x == std::numeric_limits<migraphx::half>::lowest();
}));
}
TEST_CASE(convert_nan_upcast_test)
{
migraphx::program p;
......@@ -2321,6 +2357,46 @@ TEST_CASE(dequantizelinear)
}
}
TEST_CASE(dimensions_of_test0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {2, 4}}, {3, 3}, {4, 4}}};
auto p1 = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 3}}), p1);
p.compile(migraphx::make_target("ref"));
std::vector<float> x_data(24, 1.0);
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 3, 4}};
migraphx::parameter_map params;
params["x"] = migraphx::argument(input_fixed_shape, x_data.data());
auto result = p.eval(params).back();
std::vector<int64_t> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<int64_t> gold = {2, 3, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
}
TEST_CASE(dimensions_of_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4, {1, 4}}, {3, 3}, {3, 8}, {3, 8}}};
auto p1 = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("dimensions_of", {{"start", 2}, {"end", 4}}), p1);
p.compile(migraphx::make_target("ref"));
std::vector<float> x_data(48, 1.0);
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params;
params["x"] = migraphx::argument(input_fixed_shape, x_data.data());
auto result = p.eval(params).back();
std::vector<int64_t> results_vector(2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<int64_t> gold = {4, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
}
TEST_CASE(div_test)
{
migraphx::program p;
......@@ -5810,6 +5886,29 @@ TEST_CASE(prefix_scan_sum_1d)
EXPECT(results_vector == gold);
}
TEST_CASE(prefix_scan_sum_dyn_1d)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<migraphx::shape::dynamic_dimension> dd{{5, 8}};
migraphx::shape s{migraphx::shape::float_type, dd};
auto input = mm->add_parameter("X", s);
mm->add_instruction(migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", false}}),
input);
p.compile(migraphx::make_target("ref"));
std::vector<float> a = {1, 2, 3, 4, 5, 6};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {6}};
migraphx::parameter_map params0;
params0["X"] = migraphx::argument(input_fixed_shape0, a.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0, 3.0, 6.0, 10.0, 15.0, 21.0};
EXPECT(results_vector == gold);
}
TEST_CASE(prefix_scan_sum_2d)
{
{
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -228,6 +228,15 @@ TEST_CASE(test_shape_dynamic_errors)
EXPECT(test::throws([&] { s.index(std::vector<std::size_t>{0, 1}); }));
EXPECT(test::throws([&] { s.with_lens({3, 5}); }));
EXPECT(test::throws([&] { s.with_lens(shape::float_type, {3, 5}); }));
EXPECT(test::throws([&] { s.lens(); }));
EXPECT(test::throws([&] { s.strides(); }));
}
TEST_CASE(test_shape_static_dyn_dim_error)
{
using migraphx::shape;
migraphx::shape s{shape::float_type, {2, 3, 4}};
EXPECT(test::throws([&] { s.dyn_dims(); }));
}
TEST_CASE(test_shape_dynamic_serialize)
......
......@@ -196,7 +196,6 @@ TEST_CASE(batchnorm_test)
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
......@@ -204,11 +203,11 @@ TEST_CASE(batchnorm_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_test.pb", true);
......@@ -227,7 +226,6 @@ TEST_CASE(batchnorm_half_test)
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::half_type, {1e-4f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
......@@ -235,11 +233,11 @@ TEST_CASE(batchnorm_half_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnorm_half_test.pb", true);
......@@ -258,7 +256,6 @@ TEST_CASE(batchnormv3_test)
std::vector<float> scale_data(32, 1.0);
auto scale = mm->add_literal(migraphx::shape{migraphx::shape::float_type, {32}}, scale_data);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto usq_scale = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
......@@ -266,11 +263,11 @@ TEST_CASE(batchnormv3_test)
auto usq_mean = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), var);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto x_sub_mean = add_common_op(*mm, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto rsqrt = mm->add_instruction(migraphx::make_op("rsqrt"), var_eps);
auto mul0 = add_common_op(*mm, migraphx::make_op("mul"), {usq_scale, rsqrt});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {x_sub_mean, mul0});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto prog = optimize_tf("batchnormv3_test.pb", true);
......
......@@ -88,10 +88,31 @@ inline void compile_check(migraphx::program& p,
auto num = shapes.size();
for(std::size_t i = 0; i < num; ++i)
{
if(p.get_output_shapes()[i].lens() != shapes[i].lens())
auto output_shape = p.get_output_shapes()[i];
if(output_shape.dynamic() and shapes[i].dynamic())
{
if(output_shape.dyn_dims() != shapes[i].dyn_dims())
{
std::cout << ss.str() << std::endl;
throw std::runtime_error("Compiling program with " + name +
" alters its dynamic output dimensions");
}
}
else if(not(output_shape.dynamic() or shapes[i].dynamic()))
{
if(output_shape.lens() != shapes[i].lens())
{
std::cout << ss.str() << std::endl;
throw std::runtime_error("Compiling program with " + name +
" alters its static output dimensions");
}
}
else
{
std::cout << ss.str() << std::endl;
throw std::runtime_error("Compiling program with " + name + " alters its shape");
throw std::runtime_error(
"Compiling program with " + name +
" alters its output dimensions (static shape vs dynamic shape)");
}
}
if(t.name() != "ref")
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import os, shutil, argparse, subprocess
CLANG_FORMAT_PATH = '/opt/rocm/llvm/bin'
def run(cmd, **kwargs):
print(cmd)
subprocess.run(cmd, shell=True, check=True, **kwargs)
def eval(cmd, **kwargs):
return subprocess.run(cmd,
capture_output=True,
shell=True,
check=True,
**kwargs).stdout.decode('utf-8').strip()
def get_top():
return eval("git rev-parse --show-toplevel")
def get_head():
return eval("git rev-parse --abbrev-ref HEAD")
def get_merge_base(branch):
head = get_head()
return eval(f"git merge-base {branch} {head}")
def clang_format(against, apply=False, path=CLANG_FORMAT_PATH):
base = get_merge_base(against)
clang_format = os.path.join(path, 'clang-format')
if not os.path.exists(clang_format):
print(f"{clang_format} not installed. Skipping format.")
return
git_clang_format = os.path.join(path, 'git-clang-format')
if not os.path.exists(git_clang_format):
print(f"{git_clang_format} not installed. Skipping format.")
return
diff_flag = "" if apply else "--diff"
run(f"{git_clang_format} --binary {clang_format} {diff_flag} {base}")
def get_files_changed(against, ext=('py')):
files = eval(f"git diff-index --cached --name-only {against}",
cwd=get_top()).splitlines()
return (f for f in files if f.endswith(ext))
def yapf_format(against, apply=False):
if not shutil.which('yapf'):
print("yapf not installed. Skipping format.")
return
diff_flag = "--in-place" if apply else "--diff"
files = ' '.join(get_files_changed(against))
if files:
run(f"yapf {diff_flag} -p {files}")
else:
print("No modified python files to format")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('against', default='develop', nargs='?')
parser.add_argument('-i', '--in-place', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true')
args = parser.parse_args()
try:
clang_format(args.against, apply=args.in_place)
yapf_format(args.against, apply=args.in_place)
except subprocess.CalledProcessError as ex:
if ex.stdout:
print(ex.stdout.decode('utf-8'))
if ex.stderr:
print(ex.stderr.decode('utf-8'))
if not args.quiet:
print(f"Command '{ex.cmd}' returned {ex.returncode}")
raise
# sys.exit(ex.returncode)
if __name__ == "__main__":
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment