Commit 224b7aa0 authored by charlie's avatar charlie
Browse files

Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents c4b1102e 4420ccbd
...@@ -29,19 +29,14 @@ ...@@ -29,19 +29,14 @@
#include <migraphx/instruction_ref.hpp> #include <migraphx/instruction_ref.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/dot.hpp> #include <migraphx/op/dot.hpp>
#include <migraphx/op/if_op.hpp> #include <migraphx/op/if_op.hpp>
#include <migraphx/op/reshape.hpp> #include <migraphx/op/reshape.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp> #include <migraphx/op/quant_dot.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/device_name.hpp> #include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/miopen.hpp> #include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/rocblas.hpp> #include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/compiler.hpp> #include <migraphx/gpu/compiler.hpp>
...@@ -109,9 +104,9 @@ struct miopen_apply ...@@ -109,9 +104,9 @@ struct miopen_apply
add_extend_op("scatter_none"); add_extend_op("scatter_none");
add_extend_op("topk"); add_extend_op("topk");
add_convolution_op<op::convolution>("convolution"); add_convolution_op("convolution");
add_convolution_op<op::deconvolution>("deconvolution"); add_convolution_op("deconvolution");
add_convolution_op<op::quant_convolution>("quant_convolution"); add_convolution_op("quant_convolution");
add_gemm_op<op::dot>("dot"); add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot"); add_gemm_op<op::quant_dot>("quant_dot");
add_if_op(); add_if_op();
...@@ -238,34 +233,19 @@ struct miopen_apply ...@@ -238,34 +233,19 @@ struct miopen_apply
}); });
} }
template <typename Op>
void add_convolution_op(const std::string& name) void add_convolution_op(const std::string& name)
{ {
apply_map.emplace(name, [=](instruction_ref ins) { apply_map.emplace(name, [=](instruction_ref ins) {
operation conv = operation conv = make_op(
miopen_convolution<Op>{any_cast<Op>(ins->get_operator()), int8_x4_format}; "gpu::" + name,
migraphx::context ctx = get_context(); {{"op", ins->get_operator().to_value()}, {"int8_x4_format", int8_x4_format}});
size_t ws_bytes = 0;
auto compile_conv_with_format = [&](bool format) {
conv = miopen_convolution<Op>{any_cast<Op>(ins->get_operator()), format};
auto ws = conv.compile(ctx, ins->get_shape(), to_shapes(ins->inputs()));
ws_bytes = ws.get("workspace", 0);
};
try
{ // for the regular convolution and deconvolution, this try would always succeed
compile_conv_with_format(int8_x4_format);
}
catch(migraphx::exception&)
{
// In case no solver supports the default format, retry using the other format.
compile_conv_with_format(not int8_x4_format);
}
auto args = ins->inputs();
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
auto workspace = insert_allocation(ins, shape{shape::int8_type, {ws_bytes}});
return mod->replace_instruction(ins, conv, args[0], args[1], workspace, output); return mod->replace_instruction(ins,
make_op("gpu::miopen_op", {{"op", to_value(conv)}}),
ins->inputs().at(0),
ins->inputs().at(1),
output);
}); });
} }
......
...@@ -101,7 +101,10 @@ struct mlir_handle ...@@ -101,7 +101,10 @@ struct mlir_handle
mlir_handle(T p) : handle(ptr{p}) {} mlir_handle(T p) : handle(ptr{p}) {}
T get() const { return handle.get().get(); } T get() const
{
return handle.get().get(); // NOLINT(readability-redundant-smartptr-get)
}
T release() { return handle.release().get(); } T release() { return handle.release().get(); }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <migraphx/fuse_pointwise.hpp> #include <migraphx/fuse_pointwise.hpp>
#include <migraphx/inline_module.hpp> #include <migraphx/inline_module.hpp>
#include <migraphx/insert_pad.hpp> #include <migraphx/insert_pad.hpp>
#include <migraphx/layout_nhwc.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
#include <migraphx/normalize_ops.hpp> #include <migraphx/normalize_ops.hpp>
#include <migraphx/preallocate_param.hpp> #include <migraphx/preallocate_param.hpp>
...@@ -50,6 +51,7 @@ ...@@ -50,6 +51,7 @@
#include <migraphx/simplify_qdq.hpp> #include <migraphx/simplify_qdq.hpp>
#include <migraphx/simplify_reshapes.hpp> #include <migraphx/simplify_reshapes.hpp>
#include <migraphx/gpu/allocation_model.hpp> #include <migraphx/gpu/allocation_model.hpp>
#include <migraphx/gpu/compile_miopen.hpp>
#include <migraphx/gpu/compile_ops.hpp> #include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp> #include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
...@@ -70,6 +72,7 @@ namespace gpu { ...@@ -70,6 +72,7 @@ namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS) MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_POINTWISE_FUSION) MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_POINTWISE_FUSION)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_NHWC)
struct id_pass struct id_pass
{ {
...@@ -120,6 +123,9 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -120,6 +123,9 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
simplify_algebra{}, simplify_algebra{},
simplify_reshapes{}, simplify_reshapes{},
enable_pass(enabled(MIGRAPHX_ENABLE_NHWC{}), layout_nhwc{}),
dead_code_elimination{},
simplify_reshapes{},
simplify_algebra{}, simplify_algebra{},
prefuse_ops{}, prefuse_ops{},
dead_code_elimination{}, dead_code_elimination{},
...@@ -136,6 +142,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -136,6 +142,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
eliminate_concat{concat_gpu_optimization{}}, eliminate_concat{concat_gpu_optimization{}},
dead_code_elimination{}, dead_code_elimination{},
compile_miopen{&gctx},
dead_code_elimination{},
pack_int8_args{}, pack_int8_args{},
dead_code_elimination{}, dead_code_elimination{},
fuse_ops{&ctx, options.fast_math}, fuse_ops{&ctx, options.fast_math},
......
...@@ -272,6 +272,35 @@ TEST_CASE(contiguous_input) ...@@ -272,6 +272,35 @@ TEST_CASE(contiguous_input)
EXPECT(p1 == p2); EXPECT(p1 == p2);
} }
TEST_CASE(contiguous_boolean_input)
{
migraphx::shape s{migraphx::shape::bool_type, {2, 3}};
migraphx::shape s_lit{migraphx::shape::bool_type, {1}, {0}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto one = mm->add_literal(migraphx::literal(s_lit, {1.0}));
auto yb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), one);
auto y = mm->add_instruction(migraphx::make_op("contiguous"), yb);
auto xor1 = mm->add_instruction(migraphx::make_op("logical_xor"), x, y);
mm->add_return({xor1});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto xor1 = add_pointwise(p2, "main:pointwise0", {x}, [=](auto* pm, const auto& inputs) {
auto y = pm->add_literal(migraphx::literal(s_lit, {1}));
return pm->add_instruction(migraphx::make_op("logical_xor"), inputs[0], y);
});
mm->add_return({xor1});
}
}
TEST_CASE(all_scalar_input) TEST_CASE(all_scalar_input)
{ {
migraphx::shape s{migraphx::shape::float_type}; migraphx::shape s{migraphx::shape::float_type};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/layout_nhwc.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m)
{
migraphx::run_passes(m, {migraphx::layout_nhwc{}, migraphx::dead_code_elimination{}});
}
migraphx::operation layout(std::vector<int64_t> permutation = {0, 1, 2, 3})
{
return migraphx::make_op("layout", {{"permutation", permutation}});
}
migraphx::instruction_ref add_layout_nhwc(migraphx::module& m, migraphx::instruction_ref ins)
{
return m.add_instruction(layout({0, 2, 3, 1}), ins);
}
TEST_CASE(conv_relu)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {1, 8, 16, 16}});
auto w = m1.add_literal(
migraphx::generate_literal({migraphx::shape::float_type, {16, 8, 3, 3}}));
auto conv = m1.add_instruction(
migraphx::make_op("convolution",
{{"padding", {1, 1}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
m1.add_instruction(migraphx::make_op("relu"), conv);
}
run_pass(m1);
migraphx::module m2;
{
auto x = add_layout_nhwc(
m2, m2.add_parameter("x", {migraphx::shape::float_type, {1, 8, 16, 16}}));
auto w = add_layout_nhwc(m2,
m2.add_literal(migraphx::generate_literal(
{migraphx::shape::float_type, {16, 8, 3, 3}})));
auto conv = m2.add_instruction(
migraphx::make_op("convolution",
{{"padding", {1, 1}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
auto conv_layout = m2.add_instruction(layout(), conv);
m2.add_instruction(migraphx::make_op("relu"), conv_layout);
}
EXPECT(m1.sort() == m2.sort());
}
TEST_CASE(conv_add)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {1, 8, 16, 16}});
auto w = m1.add_literal(
migraphx::generate_literal({migraphx::shape::float_type, {16, 8, 3, 3}}));
auto y = m1.add_literal(migraphx::generate_literal({migraphx::shape::float_type, {16}}));
auto conv = m1.add_instruction(
migraphx::make_op("convolution",
{{"padding", {1, 1}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
auto b = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", conv->get_shape().lens()}}),
y);
m1.add_instruction(migraphx::make_op("add"), conv, b);
}
run_pass(m1);
migraphx::module m2;
{
auto x = add_layout_nhwc(
m2, m2.add_parameter("x", {migraphx::shape::float_type, {1, 8, 16, 16}}));
auto w = add_layout_nhwc(m2,
m2.add_literal(migraphx::generate_literal(
{migraphx::shape::float_type, {16, 8, 3, 3}})));
auto y = m2.add_literal(migraphx::generate_literal({migraphx::shape::float_type, {16}}));
auto conv = m2.add_instruction(
migraphx::make_op("convolution",
{{"padding", {1, 1}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
auto conv_layout = m2.add_instruction(layout(), conv);
auto b = m2.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", conv->get_shape().lens()}}),
y);
m2.add_instruction(migraphx::make_op("add"), conv_layout, b);
}
EXPECT(m1.sort() == m2.sort());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -454,6 +454,23 @@ def binary_dyn_brcst_add_test(): ...@@ -454,6 +454,23 @@ def binary_dyn_brcst_add_test():
return ([node], [arg0, arg1], [arg_out]) return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_attr_error_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node('Add',
inputs=['0', '1'],
outputs=['out'],
broadcast=1,
axis=1)
return ([node], [arg0, arg1], [arg_out])
@onnx_test @onnx_test
def binary_dyn_brcst_mul_test(): def binary_dyn_brcst_mul_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
...@@ -5738,6 +5755,92 @@ def split_test_default(): ...@@ -5738,6 +5755,92 @@ def split_test_default():
return ([node], [x], [y1, y2]) return ([node], [x], [y1, y2])
@onnx_test
def split_test_no_attribute():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [75, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [75, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [75, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [75, 15])
split = np.ones(4) * 75
split_tensor = helper.make_tensor(name="split",
data_type=TensorProto.INT64,
dims=split.shape,
vals=split.astype(np.int64))
const_node = helper.make_node("Constant",
inputs=[],
outputs=['split'],
value=split_tensor)
node = onnx.helper.make_node(
'Split',
inputs=['x', 'split'],
outputs=['y1', 'y2', 'y3', 'y4'],
)
return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test
def split_test_no_attribute_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [75, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [75, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [75, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [75, 15])
split = np.ones(4)
split_tensor = helper.make_tensor(name="split",
data_type=TensorProto.INT64,
dims=split.shape,
vals=split.astype(np.int64))
const_node = helper.make_node("Constant",
inputs=[],
outputs=['split'],
value=split_tensor)
node = onnx.helper.make_node(
'Split',
inputs=['x', 'split'],
outputs=['y1', 'y2', 'y3', 'y4'],
)
return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test
def split_test_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [10, 7])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node('Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[1, 1, 1])
return ([node], [x], [y1, y2, y3])
@onnx_test
def split_test_no_attribute_invalid_input_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [10, 7])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node('Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[])
return ([node], [x], [y1, y2, y3])
@onnx_test @onnx_test
def sqrt_test(): def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
...@@ -6174,6 +6277,21 @@ def transpose_test(): ...@@ -6174,6 +6277,21 @@ def transpose_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def transpose_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 2, 2])
node = onnx.helper.make_node(
'Transpose',
perm=[0, 3, 1, 2],
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test @onnx_test
def transpose_gather_test(): def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6]) x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
......
...@@ -559,6 +559,14 @@ TEST_CASE(binary_dyn_brcst_add_test) ...@@ -559,6 +559,14 @@ TEST_CASE(binary_dyn_brcst_add_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(binary_dyn_brcst_attr_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("binary_dyn_brcst_attr_error_test.onnx", options); }));
}
TEST_CASE(binary_dyn_brcst_mul_test) TEST_CASE(binary_dyn_brcst_mul_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5599,6 +5607,31 @@ TEST_CASE(split_test) ...@@ -5599,6 +5607,31 @@ TEST_CASE(split_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(split_test_no_attribute)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape si{migraphx::shape::int64_type, {4}, {1}};
std::vector<int> ind = {75, 75, 75, 75};
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {300, 15}});
mm->add_literal(migraphx::literal(si, ind));
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {75}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {75}}, {"ends", {150}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {150}}, {"ends", {225}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {225}}, {"ends", {300}}}), input);
mm->add_return({r1, r2, r3, r4});
auto prog = migraphx::parse_onnx("split_test_no_attribute.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_default) TEST_CASE(split_test_default)
{ {
migraphx::program p; migraphx::program p;
...@@ -5614,6 +5647,23 @@ TEST_CASE(split_test_default) ...@@ -5614,6 +5647,23 @@ TEST_CASE(split_test_default)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(split_test_no_attribute_invalid_split)
{
EXPECT(
test::throws([&] { migraphx::parse_onnx("split_test_no_attribute_invalid_split.onnx"); }));
}
TEST_CASE(split_test_invalid_split)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("split_test_invalid_split.onnx"); }));
}
TEST_CASE(split_test_no_attribute_invalid_input_split)
{
EXPECT(test::throws(
[&] { migraphx::parse_onnx("split_test_no_attribute_invalid_input_split.onnx"); }));
}
TEST_CASE(sqrt_test) TEST_CASE(sqrt_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5923,6 +5973,24 @@ TEST_CASE(transpose_test) ...@@ -5923,6 +5973,24 @@ TEST_CASE(transpose_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(topk_attrk_test) TEST_CASE(topk_attrk_test)
{ {
migraphx::program p; migraphx::program p;
......
split_test_invalid_split:
5
xy1y2y3"Split*
axis*
split@@@split_test_invalid_splitZ
x


b
y1


b
y2


b
y3


B
\ No newline at end of file
split_test_no_attribute:
0split"Constant*
value*:KKKKBsplit
!
x
splity1y2y3y4"Splitsplit_test_no_attributeZ
x


b
y1

K
b
y2

K
b
y3

K
b
y4

K
B
\ No newline at end of file
+split_test_no_attribute_invalid_input_split:
/
xy1y2y3"Split*
axis*
split+split_test_no_attribute_invalid_input_splitZ
x


b
y1


b
y2


b
y3


B
\ No newline at end of file
%split_test_no_attribute_invalid_split:
0split"Constant*
value*:Bsplit
!
x
splity1y2y3y4"Split%split_test_no_attribute_invalid_splitZ
x


b
y1

K
b
y2

K
b
y3

K
b
y4

K
B
\ No newline at end of file
...@@ -81,6 +81,14 @@ void throws_shape(const migraphx::shape&, Ts...) ...@@ -81,6 +81,14 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function"); "An expected shape should not be passed to throws_shape function");
} }
TEST_CASE(binary_dyn_static_error)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 4, 4}};
std::vector<migraphx::shape::dynamic_dimension> b{{1, 1, 0}, {4, 4, 4}, {4, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
throws_shape(migraphx::make_op("add"), a_shape, b_shape);
}
TEST_CASE(broadcast) TEST_CASE(broadcast)
{ {
{ {
...@@ -357,6 +365,12 @@ TEST_CASE(contiguous_shape) ...@@ -357,6 +365,12 @@ TEST_CASE(contiguous_shape)
expect_shape(single, migraphx::make_op("contiguous"), single); expect_shape(single, migraphx::make_op("contiguous"), single);
} }
TEST_CASE(contiguous_dyn_shape)
{
migraphx::shape s0{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 2}}};
expect_shape(s0, migraphx::make_op("contiguous"), s0);
}
TEST_CASE(contiguous_shape_scalar) TEST_CASE(contiguous_shape_scalar)
{ {
migraphx::shape output{migraphx::shape::float_type}; migraphx::shape output{migraphx::shape::float_type};
...@@ -1207,6 +1221,21 @@ TEST_CASE(multibroadcast_2in_static_dyn1) ...@@ -1207,6 +1221,21 @@ TEST_CASE(multibroadcast_2in_static_dyn1)
a_shape); a_shape);
} }
TEST_CASE(multibroadcast_2in_static_dyn2)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 6}};
std::vector<migraphx::shape::dynamic_dimension> b{{8, 8, 0}, {6, 6, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(b)}}),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{8, 8, 0}, {6, 6, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(b)}}),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_static_dyn_error0) TEST_CASE(multibroadcast_2in_static_dyn_error0)
{ {
// doesn't match on first dimension // doesn't match on first dimension
...@@ -1253,6 +1282,22 @@ TEST_CASE(multibroadcast_2in_dyn_dyn0) ...@@ -1253,6 +1282,22 @@ TEST_CASE(multibroadcast_2in_dyn_dyn0)
a_shape); a_shape);
} }
TEST_CASE(multibroadcast_2in_dyn_dyn1)
{
std::vector<migraphx::shape::dynamic_dimension> a{{1, 4, 0}, {2, 4, 2}, {2, 4, 0}};
migraphx::shape a_shape{migraphx::shape::float_type, a};
std::vector<migraphx::shape::dynamic_dimension> b{{2, 4, 2}, {2, 4, 0}};
migraphx::shape b_shape{migraphx::shape::float_type, b};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(a)}}),
a_shape,
b_shape);
expect_shape(migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 4, 2}, {2, 4, 0}}},
migraphx::make_op("multibroadcast", {{"out_dyn_dims", migraphx::to_value(a)}}),
b_shape,
a_shape);
}
TEST_CASE(multibroadcast_2in_dyn_dyn_error0) TEST_CASE(multibroadcast_2in_dyn_dyn_error0)
{ {
// max doesn't match on second dimension of a // max doesn't match on second dimension of a
...@@ -2243,6 +2288,28 @@ TEST_CASE(transpose_shape) ...@@ -2243,6 +2288,28 @@ TEST_CASE(transpose_shape)
throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input); throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input);
} }
TEST_CASE(transpose_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{2, 2, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {1, 0}}}), input);
}
TEST_CASE(transpose_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{4, 4, 0}, {4, 4, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1, 2}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {2, 1, 0}}}), input);
}
TEST_CASE(transpose_axes_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 2}};
throws_shape(migraphx::make_op("transpose", {{"permutation", {1}}}), input);
}
TEST_CASE(step_test) TEST_CASE(step_test)
{ {
migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}}; migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}};
......
This diff is collapsed.
...@@ -38,6 +38,27 @@ TEST_CASE(test_shape_default) ...@@ -38,6 +38,27 @@ TEST_CASE(test_shape_default)
EXPECT(s.elements() == 0); EXPECT(s.elements() == 0);
EXPECT(s.bytes() == 0); EXPECT(s.bytes() == 0);
} }
TEST_CASE(test_dyn_4arg_constructor)
{
migraphx::shape s{migraphx::shape::float_type,
{
1,
4,
4,
},
{
4,
4,
4,
},
{0, 0, 0}};
std::vector<migraphx::shape::dynamic_dimension> expected_dyn_dims = {
{1, 4, 0}, {4, 4, 0}, {4, 4, 0}};
EXPECT(s.dynamic());
EXPECT(s.dyn_dims() == expected_dyn_dims);
}
TEST_CASE(test_shape_assign) TEST_CASE(test_shape_assign)
{ {
migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}}; migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}};
...@@ -194,7 +215,7 @@ TEST_CASE(test_shape_ndim_static) ...@@ -194,7 +215,7 @@ TEST_CASE(test_shape_ndim_static)
EXPECT(s1.ndim() == 4); EXPECT(s1.ndim() == 4);
migraphx::shape s2{migraphx::shape::float_type, {2, 4, 4, 1, 3}}; migraphx::shape s2{migraphx::shape::float_type, {2, 4, 4, 1, 3}};
EXPECT(s1.ndim() == 5); EXPECT(s2.ndim() == 5);
} }
TEST_CASE(test_shape_ndim_dyn) TEST_CASE(test_shape_ndim_dyn)
...@@ -207,7 +228,7 @@ TEST_CASE(test_shape_ndim_dyn) ...@@ -207,7 +228,7 @@ TEST_CASE(test_shape_ndim_dyn)
migraphx::shape s2{migraphx::shape::float_type, migraphx::shape s2{migraphx::shape::float_type,
{{1, 1, 0}, {2, 4, 0}, {2, 4, 0}, {1, 1, 1}, {3, 3, 0}}}; {{1, 1, 0}, {2, 4, 0}, {2, 4, 0}, {1, 1, 1}, {3, 3, 0}}};
EXPECT(s1.ndim() == 5); EXPECT(s2.ndim() == 5);
} }
TEST_CASE(test_shape_non_packed_single_dim) TEST_CASE(test_shape_non_packed_single_dim)
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_concat_broadcast_add : verify_program<test_concat_broadcast_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::float_type, {1, 2, 4}};
migraphx::shape s1{migraphx::shape::float_type, {1, 6, 4}};
migraphx::shape s2{migraphx::shape::float_type, {6, 1}};
auto x = mm->add_parameter("x", s0);
auto y = mm->add_parameter("y", s0);
auto z = mm->add_parameter("z", s0);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 1}}), x, y, z);
auto b = mm->add_literal(migraphx::generate_literal(s2, 15));
auto bb =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s1.lens()}}), b);
mm->add_instruction(migraphx::make_op("add"), concat, bb);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_slice_concat_add : verify_program<test_slice_concat_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::float_type, {1, 24, 2, 2}};
migraphx::shape s1{migraphx::shape::float_type, {1, 8, 2, 2}};
auto x = mm->add_parameter("x", s0);
auto y = mm->add_parameter("y", s1);
auto z = mm->add_parameter("z", s0);
auto slice = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {8}}}), x);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 1}}), slice, y, y);
mm->add_instruction(migraphx::make_op("add"), concat, z);
return p;
}
};
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import argparse
import onnx
from onnx import version_converter
def parse_args():
parser = argparse.ArgumentParser(
description=
'MIGraphX Onnx Model Convertion. Use to convert the opset of the input model to MIGraphX\'s'
)
req_args = parser.add_argument_group(title='required arguments')
req_args.add_argument('--model',
type=str,
required=True,
help='path to onnx file')
req_args.add_argument('--output',
type=str,
required=True,
help='path to output onnx file')
req_args.add_argument('--opset',
type=int,
required=True,
help='The output opset')
req_args.add_argument('--infer_shapes',
action='store_true',
help='Infer shapes for output model')
parser.add_argument('--verbose',
action='store_true',
help='show verbose information (for debugging)')
args = parser.parse_args()
return args
def main():
args = parse_args()
model_path = args.model
out_model_path = args.output
target_opset = args.opset
verbose = args.verbose
infer_shapes = args.infer_shapes
original_model = onnx.load(model_path)
if verbose:
print(f"The model before conversion:\n{original_model}")
# A full list of supported adapters can be found here:
# https://github.com/onnx/onnx/blob/main/onnx/version_converter.py#L21
# Apply the version conversion on the original model
converted_model = version_converter.convert_version(
original_model, target_opset)
if infer_shapes:
converted_model = onnx.shape_inference.infer_shapes(converted_model)
if verbose:
print(f"The model after conversion:\n{converted_model}")
# Save the ONNX model
onnx.save(converted_model, out_model_path)
if __name__ == '__main__':
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment