Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
2ab23275
Commit
2ab23275
authored
Jun 16, 2023
by
Alan Turner
Browse files
Merge remote-tracking branch 'origin/develop' into ck-integration-tuning
parents
2c3563dd
013d4829
Changes
25
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
282 additions
and
26 deletions
+282
-26
test/onnx/instance_norm_dyn_batch_half_test.onnx
test/onnx/instance_norm_dyn_batch_half_test.onnx
+0
-0
test/onnx/instance_norm_dyn_batch_test.onnx
test/onnx/instance_norm_dyn_batch_test.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+111
-26
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+42
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+129
-0
No files found.
test/onnx/instance_norm_dyn_batch_half_test.onnx
0 → 100644
View file @
2ab23275
File added
test/onnx/instance_norm_dyn_batch_test.onnx
0 → 100644
View file @
2ab23275
File added
test/onnx/onnx_test.cpp
View file @
2ab23275
...
...
@@ -3174,28 +3174,64 @@ TEST_CASE(instance_norm_test)
auto bias = mm->add_parameter("2", s2);
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), x);
auto
mean_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}})
, mean);
auto l0 = mm->add_instruction(migraphx::make_op("sub"), x, mean_bcast);
auto
l1
= mm->add_instruction(migraphx::make_op("
sqdiff"), x, mean_bcast
);
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l1);
auto
l1 = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto l0 = add_common_op(*mm, migraphx::make_op("sqdiff"), {x
, mean
}
);
auto
variance
= mm->add_instruction(migraphx::make_op("
reduce_mean", {{"axes", {2, 3}}}), l0
);
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1e-5}});
auto epsilon_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", dims}}), epsilon_literal);
auto variance_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), variance);
auto l2 = mm->add_instruction(migraphx::make_op("add"), variance_bcast, epsilon_bcast);
auto l2 = add_common_op(*mm, migraphx::make_op("add"), {variance, epsilon_literal});
auto l3 = mm->add_instruction(migraphx::make_op("rsqrt"), l2);
auto l4 = mm->add_instruction(migraphx::make_op("mul"), l0, l3);
auto l4 = add_common_op(*mm, migraphx::make_op("mul"), {l1, l3});
auto scale_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), scale);
auto bias_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), bias);
auto l5 = mm->add_instruction(migraphx::make_op("mul"), l4, scale_bcast);
mm->add_instruction(migraphx::make_op("add"), l5, bias_bcast);
auto ret = mm->add_instruction(migraphx::make_op("add"), l5, bias_bcast);
mm->add_return({ret});
migraphx::onnx_options options;
auto prog = migraphx::parse_onnx("instance_norm_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(instance_norm_dyn_batch_test)
{
// instancenorm with dynamic input in the 0'th (batch) dimension
migraphx::shape s1{migraphx::shape::float_type, {{1, 2, {2}}, {2, 2}, {3, 3}, {3, 3}}};
migraphx::shape s2{migraphx::shape::float_type, {2}};
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("0", s1);
auto scale = mm->add_parameter("1", s2);
auto bias = mm->add_parameter("2", s2);
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), x);
auto l1 = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto l0 = add_common_op(*mm, migraphx::make_op("sqdiff"), {x, mean});
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l0);
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1e-5}});
auto l2 = add_common_op(*mm, migraphx::make_op("add"), {variance, epsilon_literal});
auto l3 = mm->add_instruction(migraphx::make_op("rsqrt"), l2);
auto l4 = add_common_op(*mm, migraphx::make_op("mul"), {l1, l3});
auto scale_bcast = mm->add_instruction(migraphx::make_op("broadcast", {{"axis", 1}}), scale, x);
auto bias_bcast = mm->add_instruction(migraphx::make_op("broadcast", {{"axis", 1}}), bias, x);
auto l5 = mm->add_instruction(migraphx::make_op("mul"), l4, scale_bcast);
auto ret = mm->add_instruction(migraphx::make_op("add"), l5, bias_bcast);
mm->add_return({ret});
auto prog = optimize_onnx("instance_norm_test.onnx");
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 2, {2}};
auto prog = migraphx::parse_onnx("instance_norm_dyn_batch_test.onnx", options);
EXPECT(p == prog);
}
...
...
@@ -3212,6 +3248,7 @@ TEST_CASE(instance_norm_half_test)
auto scale_fp16 = mm->add_parameter("1", s2);
auto bias_fp16 = mm->add_parameter("2", s2);
// conversion of half type to float is enabled by default
auto x = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), x_fp16);
auto scale = mm->add_instruction(
...
...
@@ -3220,20 +3257,19 @@ TEST_CASE(instance_norm_half_test)
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), bias_fp16);
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), x);
auto mean_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), mean);
auto l0 = mm->add_instruction(migraphx::make_op("sub"), x, mean_bcast);
auto l1 = mm->add_instruction(migraphx::make_op("sqdiff"), x, mean_bcast);
auto l0 = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto l1 = add_common_op(*mm, migraphx::make_op("sqdiff"), {x, mean});
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l1);
// type of epsilon_literal is same as 0'th input; convert instruction will be added by
// add_common_op
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1e-5}});
auto epsilon_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", dims}}), epsilon_literal);
auto variance_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", dims}}), variance);
auto l2 = mm->add_instruction(migraphx::make_op("add"), variance_bcast, epsilon_bcast);
auto l2 = add_common_op(*mm, migraphx::make_op("add"), {variance, epsilon_literal});
auto l3 = mm->add_instruction(migraphx::make_op("rsqrt"), l2);
auto l4 = mm->add_instruction(migraphx::make_op("mul"), l0, l3);
auto l4 = add_common_op(*mm, migraphx::make_op("mul"), {l0, l3});
auto scale_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), scale);
auto bias_bcast = mm->add_instruction(
...
...
@@ -3247,6 +3283,55 @@ TEST_CASE(instance_norm_half_test)
EXPECT(p == prog);
}
TEST_CASE(instance_norm_dyn_batch_half_test)
{
// instancenorm with half type, dynamic input in the 0'th (batch) dimension
migraphx::shape s1{migraphx::shape::half_type, {{1, 2, {2}}, {2, 2}, {3, 3}, {3, 3}}};
migraphx::shape s2{migraphx::shape::half_type, {2}};
migraphx::program p;
auto* mm = p.get_main_module();
auto x_fp16 = mm->add_parameter("0", s1);
auto scale_fp16 = mm->add_parameter("1", s2);
auto bias_fp16 = mm->add_parameter("2", s2);
// conversion of half type to float is enabled by default
auto x = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), x_fp16);
auto scale = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), scale_fp16);
auto bias = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), bias_fp16);
auto mean = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), x);
auto l0 = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto l1 = add_common_op(*mm, migraphx::make_op("sqdiff"), {x, mean});
auto variance = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {2, 3}}}), l1);
// type of epsilon_literal is same as 0'th input; convert instruction will be added by
// add_common_op
auto epsilon_literal =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1e-5}});
auto l2 = add_common_op(*mm, migraphx::make_op("add"), {variance, epsilon_literal});
auto l3 = mm->add_instruction(migraphx::make_op("rsqrt"), l2);
auto l4 = add_common_op(*mm, migraphx::make_op("mul"), {l0, l3});
auto scale_bcast = mm->add_instruction(migraphx::make_op("broadcast", {{"axis", 1}}), scale, x);
auto bias_bcast = mm->add_instruction(migraphx::make_op("broadcast", {{"axis", 1}}), bias, x);
auto l5 = mm->add_instruction(migraphx::make_op("mul"), l4, scale_bcast);
auto instance_norm_fp32 = mm->add_instruction(migraphx::make_op("add"), l5, bias_bcast);
auto ret = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}),
instance_norm_fp32);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 2, {2}};
auto prog = migraphx::parse_onnx("instance_norm_dyn_batch_half_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(instance_norm_type_mismatch_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("instance_norm_type_mismatch_test.onnx"); }));
...
...
test/onnx/verify_onnx.cpp
View file @
2ab23275
...
...
@@ -880,6 +880,48 @@ TEST_CASE(instance_norm_test)
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
instance_norm_dyn_batch_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"instance_norm_dyn_batch_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
,
3
}};
std
::
vector
<
float
>
data0
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
}};
std
::
vector
<
float
>
data1
=
{
1
,
2
};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
}};
std
::
vector
<
float
>
data2
=
{
0
,
1
};
migraphx
::
parameter_map
pp
;
pp
[
"0"
]
=
migraphx
::
argument
(
s0
,
data0
.
data
());
pp
[
"1"
]
=
migraphx
::
argument
(
s1
,
data1
.
data
());
pp
[
"2"
]
=
migraphx
::
argument
(
s2
,
data2
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
1.54919
,
-
1.16189
,
-
0.774596
,
-
0.387298
,
0
,
0.387298
,
0.774596
,
1.16189
,
1.54919
,
-
2.09838
,
-
1.32379
,
-
0.549192
,
0.225404
,
1
,
1.7746
,
2.54919
,
3.32379
,
4.09838
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
instance_norm_3d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"instance_norm_val_3d_test.onnx"
);
...
...
test/op_shape_test.cpp
View file @
2ab23275
...
...
@@ -187,6 +187,14 @@ TEST_CASE(broadcast_axis_out_of_range_error)
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
4
},
{
"out_lens"
,
lens
}}),
input
);
}
TEST_CASE
(
broadcast_1in_dyn_error
)
{
// broadcast doesn't support single dynamic shape input
std
::
vector
<
std
::
size_t
>
lens
{
3
,
2
,
4
,
3
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
},
{
2
,
2
}}};
throws_shape
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
2
},
{
"out_lens"
,
lens
}}),
input
);
}
TEST_CASE
(
broadcast_2in_static_static
)
{
migraphx
::
shape
a_input
{
migraphx
::
shape
::
float_type
,
{
4
},
{
1
}};
...
...
@@ -1434,6 +1442,14 @@ TEST_CASE(multibroadcast)
}
}
TEST_CASE
(
multibroadcast_1in_dyn_error_0
)
{
// multibroadcast doesn't support single dynamic shape input
std
::
vector
<
std
::
size_t
>
lens
{
4
,
4
,
1
,
3
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
},
{
4
,
4
}}};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
input
);
}
TEST_CASE
(
multibroadcast_2in_static_dyn0
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
...
...
@@ -2208,6 +2224,119 @@ TEST_CASE(reshape_shape)
}
}
// This uses the permutation to compute the reshape since its simpler than
// trying to calculate strides. As we collapse or expand dimensions, we
// remove the collapsed dimensions or duplicate the expanded dimensions in
// the permutation. Then we renumber the permutation. So for dimensions of 4,
// 24, 1, 1, 1 with a permutation of 1, 0, 2, 3, 4 that reshapes to 4, 1, 3,
// 4, 2, we first remove the collapsed dimensions or duplicate the expanded
// dimensions which gives 1, 0, 0, 0, 0. Then after renumbering we get a
// final permutation of 4, 0, 1, 2, 3.
TEST_CASE
(
reshape_nonstandard
)
{
auto
input
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
4
,
24
,
1
,
1
,
1
},
migraphx
::
invert_permutation
({
1
,
0
,
2
,
3
,
4
}));
std
::
vector
<
std
::
pair
<
std
::
vector
<
std
::
size_t
>
,
std
::
vector
<
int64_t
>>>
tests
{
{{
4
,
24
},
{
1
,
0
}},
{{
4
,
24
,
1
,
1
,
1
,
1
},
{
1
,
0
,
2
,
3
,
4
,
5
}},
{{
4
,
8
,
3
,
1
,
1
},
{
2
,
0
,
1
,
3
,
4
}},
{{
4
,
1
,
3
,
4
,
2
},
{
4
,
0
,
1
,
2
,
3
}},
{{
4
,
1
,
4
,
3
,
2
},
{
4
,
0
,
1
,
2
,
3
}},
{{
4
,
2
,
4
,
3
},
{
3
,
0
,
1
,
2
}},
{{
4
,
2
,
12
,
1
},
{
2
,
0
,
1
,
3
}},
{{
4
,
2
,
1
,
12
},
{
3
,
0
,
1
,
2
}},
{{
4
,
4
,
2
,
3
},
{
3
,
0
,
1
,
2
}},
{{
4
,
8
,
1
,
3
},
{
3
,
0
,
1
,
2
}},
{{
4
,
8
,
3
,
1
},
{
2
,
0
,
1
,
3
}}};
for
(
const
auto
&
[
dims
,
perm
]
:
tests
)
{
migraphx
::
shape
output
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
dims
,
migraphx
::
invert_permutation
(
perm
));
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
dims
}}),
input
);
}
}
TEST_CASE
(
reshape_nonstandard_squeeze
)
{
auto
input
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
2
,
16
,
16
,
1280
},
migraphx
::
invert_permutation
({
0
,
2
,
3
,
1
}));
std
::
vector
<
std
::
size_t
>
lens
=
{
2
,
256
,
1280
};
migraphx
::
shape
output
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
lens
,
migraphx
::
invert_permutation
({
0
,
2
,
1
}));
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
lens
}}),
input
);
}
TEST_CASE
(
reshape_nonstandard_error
)
{
auto
input
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
4
,
24
,
1
,
1
,
1
},
migraphx
::
invert_permutation
({
1
,
0
,
2
,
3
,
4
}));
for
(
auto
&&
new_shape
:
std
::
vector
<
std
::
vector
<
int64_t
>>
{{
4
,
8
,
3
,
2
,
2
},
{
1
},
{
4
,
8
,
4
},
{
4
,
24
,
1
,
1
,
1
,
1
,
2
},
{
8
,
4
,
4
},
{
4
,
1
,
3
,
-
1
,
-
1
},
{
4
,
3
,
0
},
{
4
,
3
,
2
},
{
3
,
0
},
{
3
,
2
}})
{
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
}
TEST_CASE
(
reshape_nonpacked_unsqueeze1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
16
},
{
32
,
2
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
8
},
{
32
,
16
,
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_nonpacked_unsqueeze2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
16
},
{
32
,
2
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
16
},
{
64
,
32
,
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_nonpacked_squeeze
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
16
},
{
32
,
2
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
64
},
{
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_broadcast_unsqueeze1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
1280
},
{
0
,
0
,
1
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
16
,
16
,
1280
},
{
0
,
0
,
0
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_broadcast_unsqueeze2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
1280
},
{
0
,
0
,
1
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
16
,
80
},
{
0
,
0
,
80
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_broadcast_squeeze
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
16
,
16
,
1280
},
{
0
,
0
,
0
,
1
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
1280
},
{
0
,
0
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_broadcast_squeeze_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
16
,
16
,
1280
},
{
0
,
0
,
0
,
1
}};
std
::
vector
<
int64_t
>
new_shape
=
{
2
,
16
,
20480
};
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
TEST_CASE
(
reshape_dyn_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
24
,
24
},
{
1
,
1
},
{
1
,
1
}}};
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment