Commit 30a0b238 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

manual merge

parents 72aabeb5 0b6b33bc
......@@ -41,7 +41,15 @@ constexpr auto concat_slice(Output out, Input, Start)
return Start{} * output_shape.strides[Axis];
});
constexpr auto s = make_shape(lens, strides);
return make_tensor_view(&out[offset], s);
MIGRAPHX_ASSERT(offset < out.get_shape().element_space());
MIGRAPHX_ASSERT((s.element_space() + offset) <= out.get_shape().element_space());
return make_tensor_view(out.data() + offset, s);
}
template <index_int Axis, class Input, class Start, class... Ts>
constexpr auto concat_slices(Input input, Start start, Ts... xs)
{
return [=](auto f) { f(concat_slice<Axis>(xs, input, start)...); };
}
template <index_int Axis, class Input>
......@@ -51,15 +59,19 @@ constexpr auto concat_ends(Input)
return _c<lens[Axis]>;
}
template <index_int Axis, class Output, class... Inputs>
__device__ void concat(Output output, Inputs... inputs)
template <index_int Axis, class... Inputs>
__device__ auto concat(Inputs... inputs)
{
auto idx = make_index();
fold([&](auto start, auto input) {
auto y = concat_slice<Axis>(output, input, start);
idx.global_stride(input.get_shape().elements(), [&](auto i) { y[i] = input[i]; });
return start + concat_ends<Axis>(input);
})(_c<0>, inputs...);
return [=](auto f, auto... ts) {
auto idx = make_index();
fold([&](auto start, auto input) {
concat_slices<Axis>(input, start, ts...)([&](auto y, auto... xs) {
idx.global_stride(input.get_shape().elements(),
[&](auto i) { y[i] = f(input[i], xs[i]...); });
});
return start + concat_ends<Axis>(input);
})(_c<0>, inputs...);
};
}
} // namespace migraphx
......
......@@ -101,7 +101,10 @@ struct mlir_handle
mlir_handle(T p) : handle(ptr{p}) {}
T get() const { return handle.get().get(); }
T get() const
{
return handle.get().get(); // NOLINT(readability-redundant-smartptr-get)
}
T release() { return handle.release().get(); }
......
......@@ -420,6 +420,74 @@ def batch_norm_invalid_bias_rank_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def binary_dyn_brcst_prelu_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'PRelu',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_add_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Add',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_attr_error_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node('Add',
inputs=['0', '1'],
outputs=['out'],
broadcast=1,
axis=1)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_mul_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Mul',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
......@@ -5687,6 +5755,92 @@ def split_test_default():
return ([node], [x], [y1, y2])
@onnx_test
def split_test_no_attribute():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [75, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [75, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [75, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [75, 15])
split = np.ones(4) * 75
split_tensor = helper.make_tensor(name="split",
data_type=TensorProto.INT64,
dims=split.shape,
vals=split.astype(np.int64))
const_node = helper.make_node("Constant",
inputs=[],
outputs=['split'],
value=split_tensor)
node = onnx.helper.make_node(
'Split',
inputs=['x', 'split'],
outputs=['y1', 'y2', 'y3', 'y4'],
)
return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test
def split_test_no_attribute_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [75, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [75, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [75, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [75, 15])
split = np.ones(4)
split_tensor = helper.make_tensor(name="split",
data_type=TensorProto.INT64,
dims=split.shape,
vals=split.astype(np.int64))
const_node = helper.make_node("Constant",
inputs=[],
outputs=['split'],
value=split_tensor)
node = onnx.helper.make_node(
'Split',
inputs=['x', 'split'],
outputs=['y1', 'y2', 'y3', 'y4'],
)
return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test
def split_test_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [10, 7])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node('Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[1, 1, 1])
return ([node], [x], [y1, y2, y3])
@onnx_test
def split_test_no_attribute_invalid_input_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [10, 7])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node('Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[])
return ([node], [x], [y1, y2, y3])
@onnx_test
def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......
......@@ -521,6 +521,76 @@ TEST_CASE(batch_norm_invalid_bias_rank)
EXPECT(test::throws([&] { migraphx::parse_onnx("batch_norm_invalid_bias_rank.onnx"); }));
}
TEST_CASE(binary_dyn_brcst_prelu_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 5}});
auto ret = add_common_op(*mm, migraphx::make_op("prelu"), {l0, l1});
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_prelu_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(binary_dyn_brcst_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {4, 5}});
auto l1 = mm->add_parameter(
"1",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto ret = add_common_op(*mm, migraphx::make_op("add"), {l0, l1});
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_add_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(binary_dyn_brcst_attr_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("binary_dyn_brcst_attr_error_test.onnx", options); }));
}
TEST_CASE(binary_dyn_brcst_mul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 1}});
auto bl1 = mm->add_instruction(
migraphx::make_op("multibroadcast",
{{"out_dyn_dims", to_value(l0->get_shape().dyn_dims())}}),
l1,
l0);
auto ret = mm->add_instruction(migraphx::make_op("mul"), l0, bl1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_mul_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(cast_test)
{
migraphx::program p;
......@@ -5537,6 +5607,31 @@ TEST_CASE(split_test)
EXPECT(p == prog);
}
TEST_CASE(split_test_no_attribute)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape si{migraphx::shape::int64_type, {4}, {1}};
std::vector<int> ind = {75, 75, 75, 75};
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {300, 15}});
mm->add_literal(migraphx::literal(si, ind));
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {75}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {75}}, {"ends", {150}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {150}}, {"ends", {225}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {225}}, {"ends", {300}}}), input);
mm->add_return({r1, r2, r3, r4});
auto prog = migraphx::parse_onnx("split_test_no_attribute.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_default)
{
migraphx::program p;
......@@ -5552,6 +5647,23 @@ TEST_CASE(split_test_default)
EXPECT(p == prog);
}
TEST_CASE(split_test_no_attribute_invalid_split)
{
EXPECT(
test::throws([&] { migraphx::parse_onnx("split_test_no_attribute_invalid_split.onnx"); }));
}
TEST_CASE(split_test_invalid_split)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("split_test_invalid_split.onnx"); }));
}
TEST_CASE(split_test_no_attribute_invalid_input_split)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("split_test_no_attribute_invalid_input_split.onnx"); }));
}
TEST_CASE(sqrt_test)
{
migraphx::program p;
......
split_test_invalid_split:
5
xy1y2y3"Split*
axis*
split@@@split_test_invalid_splitZ
x


b
y1


b
y2


b
y3


B
\ No newline at end of file
split_test_no_attribute:
0split"Constant*
value*:KKKKBsplit
!
x
splity1y2y3y4"Splitsplit_test_no_attributeZ
x


b
y1

K
b
y2

K
b
y3

K
b
y4

K
B
\ No newline at end of file
+split_test_no_attribute_invalid_input_split:
/
xy1y2y3"Split*
axis*
split+split_test_no_attribute_invalid_input_splitZ
x


b
y1


b
y2


b
y3


B
\ No newline at end of file
%split_test_no_attribute_invalid_split:
0split"Constant*
value*:Bsplit
!
x
splity1y2y3y4"Split%split_test_no_attribute_invalid_splitZ
x


b
y1

K
b
y2

K
b
y3

K
b
y4

K
B
\ No newline at end of file
......@@ -81,6 +81,14 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function");
}
TEST_CASE(binary_dyn_static_error)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 4, 4}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 1, 0}, {4, 4, 4}, {4, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("add"), a_shape, b_shape);
}
TEST_CASE(broadcast)
{
{
......@@ -118,6 +126,69 @@ TEST_CASE(broadcast)
}
}
TEST_CASE(broadcast_axis_out_of_range_error)
{
std::vector<std::size_t> lens{1, 1};
migraphx::shape input{migraphx::shape::float_type, {1}, {0}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 4}, {"out_lens", lens}}), input);
}
TEST_CASE(broadcast_2in_static_static)
{
migraphx::shape a_input{migraphx::shape::float_type, {4}, {1}};
migraphx::shape b_input{migraphx::shape::float_type, {4, 4}, {4, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 4}, {1, 0}},
migraphx::make_op("broadcast", {{"axis", 0}}),
a_input,
b_input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 4}, {0, 1}},
migraphx::make_op("broadcast", {{"axis", 1}}),
a_input,
b_input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 2}}), a_input, b_input);
}
TEST_CASE(broadcast_2in_not_matching_error)
{
migraphx::shape a_input{migraphx::shape::float_type, {4}, {1}};
migraphx::shape b_input{migraphx::shape::float_type, {2, 2}, {2, 1}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 1}}), a_input, b_input);
}
TEST_CASE(broadcast_2in_dynamic_s0_error1)
{
migraphx::shape a_input{migraphx::shape::float_type, {4, 2}, {2, 1}};
migraphx::shape b_input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {2, 2, 0}}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 0}}), b_input, a_input);
}
TEST_CASE(broadcast_2in_dynamic_s0_error2)
{
std::vector<migraphx::shape::dynamic_dimension> dd{{4, 4, 0}};
migraphx::shape a_input{migraphx::shape::float_type, dd};
migraphx::shape b_input{migraphx::shape::float_type, {4, 4}, {4, 1}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 0}}), a_input, b_input);
}
TEST_CASE(broadcast_2in_static_dyn)
{
migraphx::shape a_input{migraphx::shape::float_type, {4}, {1}};
migraphx::shape b_input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {2, 2, 0}}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 0}}), a_input, b_input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {2, 2, 0}}},
migraphx::make_op("broadcast", {{"axis", 1}}),
a_input,
b_input);
throws_shape(migraphx::make_op("broadcast", {{"axis", 2}}), a_input, b_input);
}
TEST_CASE(broadcast_2in_dyn_s0_ndim_greater_than_1_error)
{
migraphx::shape a_input{migraphx::shape::float_type, {4, 2}};
migraphx::shape b_input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {2, 2, 0}}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 0}}), a_input, b_input);
}
TEST_CASE(convolution_shape)
{
migraphx::shape output{migraphx::shape::float_type, {4, 4, 1, 1}};
......@@ -1114,6 +1185,213 @@ TEST_CASE(multibroadcast)
}
}
TEST_CASE(multibroadcast_2in_static_dyn0)
{
migraphx::shape a_shape{migraphx::shape::float_type, {4, 4}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 4, 0}, {4, 4, 4}, {4, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn1)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{8, 8, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn2)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{8, 8, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(b)}}),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(b)}}),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn_error0)
{
// doesn't match on first dimension
migraphx::shape a_shape{migraphx::shape::float_type, {3, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 3, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn_error1)
{
// doesn't match on first dimension
migraphx::shape a_shape{migraphx::shape::float_type, {3, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 4, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn_error2)
{
// doesn't match on first dimension
migraphx::shape a_shape{migraphx::shape::float_type, {3, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 2, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multibroadcast_2in_dyn_dyn0)
{
std::vector<migraphx::shape::dynamic_dimension> a{{1, 4, 0}, {2, 4, 2}, {2, 4, 0}};
migraphx::shape a_shape{migraphx::shape::float_type, a};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 4, 2}, {2, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_dyn_dyn1)
{
std::vector<migraphx::shape::dynamic_dimension> a{{1, 4, 0}, {2, 4, 2}, {2, 4, 0}};
migraphx::shape a_shape{migraphx::shape::float_type, a};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 4, 2}, {2, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(a)}}),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(a)}}),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_dyn_dyn_error0)
{
// max doesn't match on second dimension of a
std::vector<migraphx::shape::dynamic_dimension> a{{1, 4, 0}, {2, 4, 2}, {2, 4, 0}};
migraphx::shape a_shape{migraphx::shape::float_type, a};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 5, 2}, {2, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multibroadcast_2in_dyn_dyn_error1)
{
// opt doesn't match on second dimension of a
std::vector<migraphx::shape::dynamic_dimension> a{{1, 4, 0}, {2, 4, 2}, {2, 4, 0}};
migraphx::shape a_shape{migraphx::shape::float_type, a};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 4, 3}, {2, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multibroadcast_2in_static_static0)
{
migraphx::shape a_shape{migraphx::shape::float_type, {3, 6}};
migraphx::shape b_shape{migraphx::shape::float_type, {3, 6}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 6}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 6}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_static1)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 8}};
migraphx::shape b_shape{migraphx::shape::float_type, {4, 8}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 8}, {0, 1}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 8}, {8, 1}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_static2)
{
migraphx::shape a_shape{migraphx::shape::float_type, {8}};
migraphx::shape b_shape{migraphx::shape::float_type, {4, 4, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 4, 8}, {0, 0, 1}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 4, 8}, {4, 1, 0}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_static3)
{
migraphx::shape a_shape{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape b_shape{migraphx::shape::float_type, {4, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 4, 4}, {16, 4, 1}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 4, 4}, {0, 1, 0}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_static4)
{
migraphx::shape a_shape{migraphx::shape::float_type, {3, 1, 4}};
migraphx::shape b_shape{migraphx::shape::float_type, {4, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 4, 4}, {4, 0, 1}},
migraphx::make_op("multibroadcast"),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 4, 4}, {0, 1, 0}},
migraphx::make_op("multibroadcast"),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_static_error0)
{
migraphx::shape a_shape{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape b_shape{migraphx::shape::float_type, {4, 3}};
throws_shape(migraphx::make_op("multibroadcast"), a_shape, b_shape);
throws_shape(migraphx::make_op("multibroadcast"), b_shape, a_shape);
}
TEST_CASE(multinomial)
{
migraphx::shape s{migraphx::shape::float_type, {2, 5}};
......
This diff is collapsed.
......@@ -38,6 +38,27 @@ TEST_CASE(test_shape_default)
EXPECT(s.elements() == 0);
EXPECT(s.bytes() == 0);
}
TEST_CASE(test_dyn_4arg_constructor)
{
migraphx::shape s{migraphx::shape::float_type,
{
1,
4,
4,
},
{
4,
4,
4,
},
{0, 0, 0}};
std::vector<migraphx::shape::dynamic_dimension> expected_dyn_dims = {
{1, 4, 0}, {4, 4, 0}, {4, 4, 0}};
EXPECT(s.dynamic());
EXPECT(s.dyn_dims() == expected_dyn_dims);
}
TEST_CASE(test_shape_assign)
{
migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}};
......@@ -185,6 +206,31 @@ TEST_CASE(test_shape_packed)
EXPECT(not s.broadcasted());
}
TEST_CASE(test_shape_ndim_static)
{
migraphx::shape s0{migraphx::shape::float_type, {2, 2}};
EXPECT(s0.ndim() == 2);
migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4, 4}};
EXPECT(s1.ndim() == 4);
migraphx::shape s2{migraphx::shape::float_type, {2, 4, 4, 1, 3}};
EXPECT(s2.ndim() == 5);
}
TEST_CASE(test_shape_ndim_dyn)
{
migraphx::shape s0{migraphx::shape::float_type, {{2, 2, 0}, {2, 2, 0}}};
EXPECT(s0.ndim() == 2);
migraphx::shape s1{migraphx::shape::float_type, {{1, 1, 0}, {2, 4, 0}, {2, 4, 0}, {2, 4, 0}}};
EXPECT(s1.ndim() == 4);
migraphx::shape s2{migraphx::shape::float_type,
{{1, 1, 0}, {2, 4, 0}, {2, 4, 0}, {1, 1, 1}, {3, 3, 0}}};
EXPECT(s2.ndim() == 5);
}
TEST_CASE(test_shape_non_packed_single_dim)
{
migraphx::shape s{migraphx::shape::float_type, {1, 64, 35, 35}, {156800, 1225, 35, 1}};
......@@ -212,6 +258,21 @@ TEST_CASE(test_shape_transposed2)
EXPECT(not s.broadcasted());
}
TEST_CASE(test_shape_static_to_dynamic)
{
migraphx::shape s0{migraphx::shape::float_type, {1, 2, 4, 4}};
migraphx::shape s1 = s0.to_dynamic();
migraphx::shape s2{migraphx::shape::float_type, {{1, 1, 0}, {2, 2, 0}, {4, 4, 0}, {4, 4, 0}}};
EXPECT(s1 == s2);
}
TEST_CASE(test_shape_dyn_to_dynamic)
{
migraphx::shape s0{migraphx::shape::float_type, {{1, 1, 0}, {2, 4, 0}, {2, 4, 0}, {2, 4, 0}}};
migraphx::shape s1 = s0.to_dynamic();
EXPECT(s0 == s1);
}
TEST_CASE(test_shape_overlap)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2, 3}, {6, 3, 2}};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_concat_broadcast_add : verify_program<test_concat_broadcast_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::float_type, {1, 2, 4}};
migraphx::shape s1{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape s2{migraphx::shape::float_type, {6, 1}};
auto x = mm->add_parameter("x", s0);
auto y = mm->add_parameter("y", s0);
auto z = mm->add_parameter("z", s0);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 1}}), x, y, z);
auto b = mm->add_literal(migraphx::generate_literal(s2, 15));
auto bb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s1.lens()}}), b);
mm->add_instruction(migraphx::make_op("add"), concat, bb);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_slice_concat_add : verify_program<test_slice_concat_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::float_type, {1, 24, 2, 2}};
migraphx::shape s1{migraphx::shape::float_type, {1, 8, 2, 2}};
auto x = mm->add_parameter("x", s0);
auto y = mm->add_parameter("y", s1);
auto z = mm->add_parameter("z", s0);
auto slice = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {8}}}), x);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 1}}), slice, y, y);
mm->add_instruction(migraphx::make_op("add"), concat, z);
return p;
}
};
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import argparse
import onnx
from onnx import version_converter
def parse_args():
parser = argparse.ArgumentParser(
description=
'MIGraphX Onnx Model Convertion. Use to convert the opset of the input model to MIGraphX\'s'
)
req_args = parser.add_argument_group(title='required arguments')
req_args.add_argument('--model',
type=str,
required=True,
help='path to onnx file')
req_args.add_argument('--output',
type=str,
required=True,
help='path to output onnx file')
req_args.add_argument('--opset',
type=int,
required=True,
help='The output opset')
req_args.add_argument('--infer_shapes',
action='store_true',
help='Infer shapes for output model')
parser.add_argument('--verbose',
action='store_true',
help='show verbose information (for debugging)')
args = parser.parse_args()
return args
def main():
args = parse_args()
model_path = args.model
out_model_path = args.output
target_opset = args.opset
verbose = args.verbose
infer_shapes = args.infer_shapes
original_model = onnx.load(model_path)
if verbose:
print(f"The model before conversion:\n{original_model}")
# A full list of supported adapters can be found here:
# https://github.com/onnx/onnx/blob/main/onnx/version_converter.py#L21
# Apply the version conversion on the original model
converted_model = version_converter.convert_version(
original_model, target_opset)
if infer_shapes:
converted_model = onnx.shape_inference.infer_shapes(converted_model)
if verbose:
print(f"The model after conversion:\n{converted_model}")
# Save the ONNX model
onnx.save(converted_model, out_model_path)
if __name__ == '__main__':
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment