Commit b41a56cf authored by turneram's avatar turneram
Browse files

Merge remote-tracking branch 'origin/develop' into ck-proto

parents cf1172c8 1704bb04
......@@ -26,8 +26,9 @@
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/fpga/target.hpp>
#include <migraphx/target_assignments.hpp>
#include <migraphx/iterator_for.hpp>
migraphx::program create_program()
{
......@@ -37,8 +38,8 @@ migraphx::program create_program()
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s);
auto diff = mm->add_instruction(migraphx::make_op("div"), x, y);
mm->add_instruction(migraphx::make_op("div"), diff, z);
auto diff = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_instruction(migraphx::make_op("add"), diff, z);
return p;
}
......@@ -47,14 +48,16 @@ TEST_CASE(is_supported)
auto p = create_program();
auto targets = migraphx::get_targets();
EXPECT(!targets.empty());
auto first_target = targets[0];
auto t = migraphx::make_target(first_target);
auto t = migraphx::make_target("fpga");
const auto assignments = p.get_target_assignments({t});
for(const auto& [ins, target] : assignments)
const auto* mod = p.get_main_module();
EXPECT(mod->size() == assignments.size());
for(const auto ins : iterator_for(*mod))
{
(void)ins;
EXPECT(target == first_target);
const auto& target = assignments.at(ins);
EXPECT(target == "fpga");
}
}
......
......@@ -108,15 +108,7 @@ struct function
};
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
Stream& stream_range(Stream& s, Iterator start, Iterator last);
template <class Stream>
inline Stream& operator<<(Stream& s, std::nullptr_t)
......@@ -136,6 +128,17 @@ inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.
return s;
}
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
template <class T>
const T& get_value(const T& x)
{
......
......@@ -3589,7 +3589,7 @@ def nms_test():
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3])
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
......@@ -3603,6 +3603,108 @@ def nms_test():
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_use_dyn_output_false_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT, [1, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
use_dyn_output=0)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_batch_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [None, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[None, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1,
use_dyn_output=1)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_boxes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, None, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, 1, None])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_classes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, None, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def not_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [4])
......
No preview for this file type
......@@ -954,13 +954,11 @@ TEST_CASE(conv_dynamic_img_same_upper)
TEST_CASE(conv_dynamic_kernel_same_lower)
{
std::cout << "here1\n";
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
std::cout << "here2\n";
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
......@@ -970,12 +968,10 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
std::cout << "here3\n";
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
std::cout << "here\n";
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
......@@ -3382,13 +3378,127 @@ TEST_CASE(nms_test)
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}), b, s, mo, iou, st);
migraphx::make_op("nonmaxsuppression", {{"center_point_box", true}}), b, s, mo, iou, st);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("nms_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 10, 0}, {6, 6, 0}, {4, 4, 0}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {6, 6, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
b,
s,
mo,
iou,
st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_boxes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 1, 0}, {6, 20, 0}, {4, 4, 0}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {6, 20, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {6, 20, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_boxes_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_dynamic_classes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 10, 0}, {6, 6, 0}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_classes_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nms_overwrite_use_dyn_output_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {1, 1, 6}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
migraphx::shape siou{migraphx::shape::float_type, {1}};
auto iou = mm->add_parameter("iou_threshold", siou);
migraphx::shape sst{migraphx::shape::float_type, {1}};
auto st = mm->add_parameter("score_threshold", sst);
auto ret = mm->add_instruction(
migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", true}}), b, s, mo, iou, st);
mm->add_return({ret});
migraphx::onnx_options options;
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_use_dyn_output_false_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(nonzero_dynamic_test)
{
migraphx::program p;
......
......@@ -1135,6 +1135,149 @@ TEST_CASE(multinomial)
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s, s);
}
TEST_CASE(nms_shape)
{
// use_dyn_output == false
migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
migraphx::shape max_out_s{migraphx::shape::int64_type, {1}};
migraphx::shape iou_thres_s{migraphx::shape::float_type, {1}};
migraphx::shape score_thres_s{migraphx::shape::float_type, {1}};
migraphx::shape output_s{migraphx::shape::int64_type, {6, 3}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", false}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// use_dyn_output == true
output_s = {migraphx::shape::int64_type, {{0, 6, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic batches
boxes_s = {migraphx::shape::float_type, {{1, 3, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 3, 0}, {1, 1, 0}, {6, 6, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 18, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic num boxes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 20, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {6, 20, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 20, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// use_dyn_output false with dynamic input shape
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", false}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic classes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 3, 0}, {6, 6, 0}}};
output_s = {migraphx::shape::int64_type, {{0, 6, 0}, {3, 3, 0}}};
expect_shape(output_s,
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// fixed mismatch batches
boxes_s = {migraphx::shape::float_type, {2, 6, 4}};
scores_s = {migraphx::shape::float_type, {1, 1, 6}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// fixed mismatch num boxes
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {1, 1, 4}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic mismatch batches
boxes_s = {migraphx::shape::float_type, {{1, 4, 0}, {6, 6, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{2, 8, 0}, {1, 1, 0}, {6, 6, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic mismatch num boxes
boxes_s = {migraphx::shape::float_type, {{1, 1, 0}, {6, 8, 0}, {4, 4, 0}}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {3, 9, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic number of classes, fixed boxes_s, mismatch batches
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {{1, 3, 0}, {1, 3, 0}, {6, 6, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
// dynamic number of classes, fixed boxes_s, mismatch num boxes
boxes_s = {migraphx::shape::float_type, {1, 6, 4}};
scores_s = {migraphx::shape::float_type, {{1, 1, 0}, {1, 3, 0}, {4, 8, 0}}};
throws_shape(migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_s,
scores_s,
max_out_s,
iou_thres_s,
score_thres_s);
}
TEST_CASE(pooling_shape)
{
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
......
......@@ -3624,6 +3624,174 @@ TEST_CASE(neg_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(nms_dynamic_out_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};
std::vector<float> boxes_vec = {0.5, 0.5, 1.0, 1.0, 0.5, 0.6, 1.0, 1.0, 0.5, 0.4, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0};
migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
std::vector<float> scores_vec = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
auto boxes_l = mm->add_literal(migraphx::literal(boxes_s, boxes_vec));
auto scores_l = mm->add_literal(migraphx::literal(scores_s, scores_vec));
auto max_out_l = mm->add_literal(int64_t{4});
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_l,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
auto output = p.eval({}).back();
std::vector<int64_t> result;
output.visit([&](auto out) { result.assign(out.begin(), out.end()); });
std::vector<int64_t> gold = {0, 0, 3, 0, 0, 0, 0, 0, 5};
EXPECT(migraphx::verify_range(result, gold));
}
TEST_CASE(nms_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape boxes_s{migraphx::shape::float_type, {{1, 3, 0}, {6, 6, 0}, {4, 4, 0}}};
migraphx::shape scores_s{migraphx::shape::float_type, {{1, 3, 0}, {1, 1, 0}, {6, 6, 0}}};
auto boxes_p = mm->add_parameter("boxes", boxes_s);
auto scores_p = mm->add_parameter("scores", scores_s);
auto max_out_l = mm->add_literal(int64_t{4});
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_p,
scores_p,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
std::vector<float> boxes_vec = {0.5, 0.5, 1.0, 1.0, 0.5, 0.6, 1.0, 1.0, 0.5, 0.4, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0, 0.5, 0.6, 1.0, 1.0, 0.5, 0.4, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0};
std::vector<float> scores_vec = {
0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 6, 4}};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {2, 1, 6}};
migraphx::parameter_map params0;
params0["boxes"] = migraphx::argument(input_fixed_shape0, boxes_vec.data());
params0["scores"] = migraphx::argument(input_fixed_shape1, scores_vec.data());
auto output = p.eval(params0).back();
std::vector<int64_t> result;
output.visit([&](auto out) { result.assign(out.begin(), out.end()); });
std::vector<int64_t> gold = {0, 0, 3, 0, 0, 0, 0, 0, 5, 1, 0, 3, 1, 0, 0, 1, 0, 5};
EXPECT(migraphx::verify_range(result, gold));
}
TEST_CASE(nms_dynamic_boxes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape boxes_s{migraphx::shape::float_type, {{1, 1, 0}, {4, 20, 0}, {4, 4, 0}}};
migraphx::shape scores_s{migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {4, 20, 0}}};
auto boxes_p = mm->add_parameter("boxes", boxes_s);
auto scores_p = mm->add_parameter("scores", scores_s);
auto max_out_l = mm->add_literal(int64_t{4});
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_p,
scores_p,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
std::vector<float> boxes_vec = {0.5, 0.5, 1.0, 1.0, 0.5, 0.6, 1.0, 1.0, 0.5, 0.4, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0};
std::vector<float> scores_vec = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 1, 6}};
migraphx::parameter_map params0;
params0["boxes"] = migraphx::argument(input_fixed_shape0, boxes_vec.data());
params0["scores"] = migraphx::argument(input_fixed_shape1, scores_vec.data());
auto output = p.eval(params0).back();
std::vector<int64_t> result;
output.visit([&](auto out) { result.assign(out.begin(), out.end()); });
std::vector<int64_t> gold = {0, 0, 3, 0, 0, 0, 0, 0, 5};
EXPECT(migraphx::verify_range(result, gold));
}
TEST_CASE(nms_dynamic_classes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape boxes_s{migraphx::shape::float_type, {{1, 1, 0}, {6, 6, 0}, {4, 4, 0}}};
migraphx::shape scores_s{migraphx::shape::float_type, {{1, 1, 0}, {1, 3, 0}, {6, 6, 0}}};
auto boxes_p = mm->add_parameter("boxes", boxes_s);
auto scores_p = mm->add_parameter("scores", scores_s);
auto max_out_l = mm->add_literal(int64_t{2});
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(
migraphx::make_op("nonmaxsuppression",
{{"center_point_box", true}, {"use_dyn_output", true}}),
boxes_p,
scores_p,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
std::vector<float> boxes_vec = {0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1,
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0};
std::vector<float> scores_vec = {
0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 2, 6}};
migraphx::parameter_map params0;
params0["boxes"] = migraphx::argument(input_fixed_shape0, boxes_vec.data());
params0["scores"] = migraphx::argument(input_fixed_shape1, scores_vec.data());
auto output = p.eval(params0).back();
std::vector<int64_t> result;
output.visit([&](auto out) { result.assign(out.begin(), out.end()); });
std::vector<int64_t> gold = {0, 0, 3, 0, 0, 0, 0, 1, 3, 0, 1, 0};
EXPECT(migraphx::verify_range(result, gold));
}
TEST_CASE(nms_not_center_test)
{
migraphx::program p;
......@@ -3642,12 +3810,14 @@ TEST_CASE(nms_not_center_test)
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(migraphx::make_op("nonmaxsuppression"),
boxes_l,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
// set use_dyn_output back to false in operator map
auto r =
mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"use_dyn_output", false}}),
boxes_l,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
......@@ -3675,12 +3845,13 @@ TEST_CASE(nms_test)
auto iou_threshold = mm->add_literal(0.5f);
auto score_threshold = mm->add_literal(0.0f);
auto r = mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}),
boxes_l,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
auto r =
mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", true}}),
boxes_l,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
......@@ -3712,12 +3883,13 @@ TEST_CASE(nms_transpose1_test)
auto transpose_boxes = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), t_boxes_l);
auto r = mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}),
transpose_boxes,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
auto r =
mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", true}}),
transpose_boxes,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
......@@ -3749,12 +3921,13 @@ TEST_CASE(nms_transpose2_test)
auto transpose_boxes = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {1, 2, 0}}}), t_boxes_l);
auto r = mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}),
transpose_boxes,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
auto r =
mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", true}}),
transpose_boxes,
scores_l,
max_out_l,
iou_threshold,
score_threshold);
mm->add_return({r});
p.compile(migraphx::ref::target{});
......
......@@ -39,6 +39,15 @@ void run_pass(migraphx::module& m)
migraphx::run_passes(m, {migraphx::simplify_reshapes{}, migraphx::dead_code_elimination{}});
}
inline std::vector<std::vector<std::size_t>> to_lens(const std::vector<migraphx::shape>& shapes)
{
std::vector<std::vector<std::size_t>> result;
std::transform(shapes.begin(), shapes.end(), std::back_inserter(result), [&](const auto& s) {
return s.lens();
});
return result;
}
TEST_CASE(double_contig)
{
migraphx::program p;
......@@ -1275,4 +1284,82 @@ TEST_CASE(transpose_slice_single_transpose)
EXPECT(m1 == m2);
}
TEST_CASE(transpose_slice_non_packed_axis)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {2, 384, 36, 64}});
auto transpose =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), x);
auto slice = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {12}}}),
transpose);
auto sqrt = m1.add_instruction(migraphx::make_op("sqrt"), slice);
m1.add_return({sqrt});
}
auto output_shapes = m1.get_output_shapes();
run_pass(m1);
EXPECT(m1.get_output_shapes() == output_shapes);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::float_type, {2, 384, 36, 64}});
auto unsqueeze =
m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {12}}}), x);
auto transpose = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {3, 0, 2, 1, 4}}}), unsqueeze);
auto slice = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transpose);
auto squeeze = m2.add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), slice);
auto sqrt = m2.add_instruction(migraphx::make_op("sqrt"), squeeze);
m2.add_return({sqrt});
}
EXPECT(m1 == m2);
}
TEST_CASE(transpose_slice_non_packed_multi_axis)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {2, 384, 36, 64}});
auto transpose =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), x);
auto slice1 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {12}}}),
transpose);
auto slice2 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {12}}, {"ends", {24}}}),
transpose);
auto transpose2 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), slice2);
auto slice3 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {24}}, {"ends", {36}}}),
transpose);
m1.add_return({slice1, transpose2, slice3});
}
auto output_shapes = m1.get_output_shapes();
run_pass(m1);
EXPECT(to_lens(m1.get_output_shapes()) == to_lens(output_shapes));
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::float_type, {2, 384, 36, 64}});
auto unsqueeze =
m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {12}}}), x);
auto transpose = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {3, 0, 2, 1, 4}}}), unsqueeze);
auto slice1 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transpose);
auto squeeze1 = m2.add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), slice1);
auto slice2 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), transpose);
auto squeeze2 = m2.add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), slice2);
auto transpose2 = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), squeeze2);
auto slice3 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {2}}, {"ends", {3}}}), transpose);
auto squeeze3 = m2.add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), slice3);
m2.add_return({squeeze1, transpose2, squeeze3});
}
EXPECT(m1.sort() == m2.sort());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -68,7 +68,7 @@ struct test_layernorm : verify_program<test_layernorm>
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 1, 5};
std::vector<size_t> dims = {1, 2, 5};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims});
add_layernorm(*mm, x, dims);
return p;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
struct test_softmax_large1 : verify_program<test_softmax_large1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {10000}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("add"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
struct test_softmax_large2 : verify_program<test_softmax_large2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {-10000}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("add"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
......@@ -37,8 +37,10 @@
#include <migraphx/compile_options.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/module_ref.hpp>
#include <migraphx/support_metric.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/supported_segments.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -64,12 +66,12 @@ struct target
*/
context get_context() const;
/**
* @brief Check how well an instruction is supported on a target with the given metric
* @param ins Instruction to check if it's supported
* @param metric Used to define how the return value should be interpreted
* @return The value based on the chosen metric. Negative numbers mean unsupported
* @brief Get the ranges of instructions that are supported on a target
* @param module Module to check for supported instructions
* @param metric Used to define how the quality of the support should be measured
* @return the supported segments of the graph
*/
float is_supported(T&, instruction_ref ins, support_metric m) const;
supported_segments target_is_supported(T&, const_module_ref mod, support_metric metric) const;
/**
* @brief copy an argument to the current target.
*
......@@ -115,9 +117,9 @@ argument copy_from_target(T&, const argument& arg)
}
template <class T>
float target_is_supported(T&, instruction_ref, support_metric)
supported_segments target_find_supported(T&, const_module_ref, support_metric)
{
return 0;
return {};
}
<%
......@@ -125,7 +127,7 @@ interface('target',
virtual('name', returns='std::string', const=True),
virtual('get_passes', ctx='context&', options='const compile_options&', returns='std::vector<pass>', const=True),
virtual('get_context', returns='context', const=True),
virtual('is_supported', returns='float', ins='instruction_ref', m='support_metric', const=True, default='target_is_supported'),
virtual('find_supported', returns='supported_segments', mod='const_module_ref', m='support_metric', const=True, default='target_find_supported'),
virtual('copy_to',
returns = 'argument',
input = 'const argument&',
......
......@@ -23,7 +23,9 @@
#####################################################################################
import string, sys, re
trivial = ['std::size_t', 'instruction_ref', 'support_metric']
trivial = [
'std::size_t', 'instruction_ref', 'support_metric', 'const_module_ref'
]
headers = '''
#include <algorithm>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment