Commit 2fc6b715 authored by Paul's avatar Paul
Browse files

Merge

parents 5967d68d 118e05c7
......@@ -132,109 +132,6 @@ auto visit_quantize(T&& x, Ts&&... xs)
};
}
template <class Op>
struct ref_convolution : auto_register_op<ref_convolution<Op>>
{
ref_convolution() = default;
ref_convolution(Op pop) : op(std::move(pop)) {}
Op op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "ref::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
std::vector<std::size_t> padding;
if(op.padding_mode != op::padding_mode_t::default_)
{
auto input_lens = args[0].get_shape().lens();
auto weights_lens = args[1].get_shape().lens();
padding =
op.padding_mode == op::same_upper
? calc_dyn_auto_pad(input_lens, weights_lens, op.stride, op.dilation, true)
: calc_dyn_auto_pad(input_lens, weights_lens, op.stride, op.dilation, false);
output_shape = compute_padded_shape(
args[0].get_shape(), args[1].get_shape(), padding, op.stride, op.dilation);
}
else
{
padding = op.padding;
if(output_shape.dynamic())
{
output_shape =
op.normalize_compute_shape({args.at(0).get_shape(), args.at(1).get_shape()});
}
}
argument result{output_shape};
visit_quantize(result, args[0], args[1])([&](auto output, auto input, auto weights) {
auto in_lens = input.get_shape().lens();
auto wei_lens = weights.get_shape().lens();
auto wei_n = wei_lens[0];
auto wei_c = wei_lens[1];
std::vector<std::size_t> win_size(wei_lens.begin() + 1, wei_lens.end());
par_for(output_shape.elements(), [&](auto i) {
auto idx_o = output_shape.multi(i);
auto w = idx_o[1];
auto n_dim = idx_o.size();
std::vector<std::ptrdiff_t> win_start;
for(std::size_t dim = 2; dim < n_dim; ++dim)
{
auto d_2 = dim - 2;
win_start.push_back(std::ptrdiff_t(idx_o[dim] * op.stride[d_2]) -
std::ptrdiff_t(padding[d_2]));
}
const auto group_id = w / (wei_n / op.group);
shape win_shape{output_shape.type(), win_size};
double acc = 0.0;
shape_for_each(win_shape, [&](auto idx_win) {
auto k = idx_win[0];
const auto in_ch = group_id * wei_c + k;
std::vector<std::ptrdiff_t> idx(idx_o.begin(), idx_o.end());
idx[1] = in_ch;
std::transform(idx_win.begin() + 1,
idx_win.end(),
win_start.begin(),
idx.begin() + 2,
[](std::ptrdiff_t ii, std::ptrdiff_t jj) { return ii + jj; });
std::vector<std::ptrdiff_t> idx_wei(idx_o.size());
idx_wei[0] = w;
std::copy(idx_win.begin(), idx_win.end(), idx_wei.begin() + 1);
if(std::all_of(idx.begin() + 2, idx.end(), [&](auto ii) { return ii >= 0; }) and
std::equal(idx.begin(),
idx.end(),
in_lens.begin(),
in_lens.end(),
std::less<std::ptrdiff_t>{}))
{
acc +=
input(idx.begin(), idx.end()) * weights(idx_wei.begin(), idx_wei.end());
}
});
output[i] = acc;
});
});
return result;
}
};
struct ref_im2col
{
op::im2col op;
......@@ -564,11 +461,8 @@ struct ref_apply
void init()
{
apply_map["convolution"] = extend_op<ref_convolution<op::convolution>, op::convolution>();
apply_map["dot"] = extend_op<ref_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<ref_quant_gemm, op::quant_dot>();
apply_map["quant_convolution"] =
extend_op<ref_convolution<op::quant_convolution>, op::quant_convolution>();
apply_map["im2col"] = extend_op<ref_im2col, op::im2col>();
apply_map["logsoftmax"] = extend_op<ref_softmax<op::logsoftmax>, op::logsoftmax>();
apply_map["lrn"] = extend_op<ref_lrn, op::lrn>();
......
......@@ -24,4 +24,6 @@
// clang-format off
#define MIGRAPHX_VERSION_MAJOR @PROJECT_VERSION_MAJOR@
#define MIGRAPHX_VERSION_MINOR @PROJECT_VERSION_MINOR@
#define MIGRAPHX_VERSION_PATCH @PROJECT_VERSION_PATCH@
#define MIGRAPHX_VERSION_TWEAK @PROJECT_VERSION_TWEAK@
// clang-format on
......@@ -106,19 +106,11 @@ function(add_test_executable TEST_NAME)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set_target_properties(${TEST_NAME} PROPERTIES COMPILE_FLAGS -pthread LINK_FLAGS -pthread)
endif()
separate_arguments(MIOPEN_TEST_FLAGS_ARGS UNIX_COMMAND ${MIOPEN_TEST_FLAGS})
if(MIOPEN_TEST_ALL)
set(TEST_COMMAND ${TEST_NAME} ${MIOPEN_TEST_FLOAT_ARG} --all ${MIOPEN_TEST_FLAGS_ARGS})
else()
set(TEST_COMMAND ${TEST_NAME} ${MIOPEN_TEST_FLOAT_ARG} ${MIOPEN_TEST_FLAGS_ARGS})
endif()
set(TEST_COMMAND ${TEST_NAME})
add_test_command(${TEST_NAME} ${TEST_COMMAND})
add_dependencies(tests ${TEST_NAME})
add_dependencies(check ${TEST_NAME})
target_link_libraries(${TEST_NAME} migraphx migraphx_ref migraphx_onnx)
target_link_libraries(${TEST_NAME} migraphx migraphx_onnx)
target_include_directories(${TEST_NAME} PUBLIC include)
endfunction(add_test_executable)
......@@ -171,7 +163,7 @@ foreach(ONNX_TEST ${ONNX_TESTS})
set(TEST_NAME test_${BASE_NAME})
add_executable(${TEST_NAME} ${ONNX_TEST})
rocm_clang_tidy_check(${TEST_NAME})
target_link_libraries(${TEST_NAME} migraphx_onnx migraphx_ref)
target_link_libraries(${TEST_NAME} migraphx_onnx)
target_include_directories(${TEST_NAME} PUBLIC include)
add_test(NAME ${TEST_NAME} COMMAND $<TARGET_FILE:${TEST_NAME}> WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_dependencies(tests ${TEST_NAME})
......@@ -182,7 +174,7 @@ endforeach()
set(TEST_TF_DIR ${CMAKE_CURRENT_SOURCE_DIR}/tf)
add_executable(test_tf tf/tf_test.cpp)
rocm_clang_tidy_check(test_tf)
target_link_libraries(test_tf migraphx_tf migraphx_ref)
target_link_libraries(test_tf migraphx_tf)
target_include_directories(test_tf PUBLIC include)
add_test(NAME test_tf COMMAND $<TARGET_FILE:test_tf> WORKING_DIRECTORY ${TEST_TF_DIR})
add_dependencies(tests test_tf)
......@@ -216,10 +208,7 @@ function(test_headers PREFIX)
string(MAKE_C_IDENTIFIER ${HEADER_REL} TEST_NAME)
get_filename_component(BASE_NAME ${HEADER} NAME_WE)
test_header(header_${TEST_NAME} ${PREFIX}/${BASE_NAME}.hpp)
if(MIGRAPHX_ENABLE_GPU)
target_link_libraries(header_${TEST_NAME} migraphx_gpu)
endif()
target_link_libraries(header_${TEST_NAME} migraphx_all_targets)
endforeach()
endfunction()
......@@ -229,3 +218,10 @@ test_headers(migraphx/ref ${CMAKE_SOURCE_DIR}/src/targets/ref/include/migraphx/r
if(MIGRAPHX_ENABLE_GPU)
test_headers(migraphx/gpu ${CMAKE_SOURCE_DIR}/src/targets/gpu/include/migraphx/gpu/*.hpp)
endif()
if(MIGRAPHX_ENABLE_CPU)
test_headers(migraphx/cpu ${CMAKE_SOURCE_DIR}/src/targets/cpu/include/migraphx/cpu/*.hpp)
endif()
if(MIGRAPHX_ENABLE_FPGA)
test_headers(migraphx/fpga ${CMAKE_SOURCE_DIR}/src/targets/fpga/include/migraphx/fpga/*.hpp)
endif()
......@@ -56,8 +56,10 @@ add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR})
add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR})
# GPU-based tests
if(MIGRAPHX_ENABLE_GPU)
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
find_package(hip)
add_api_test(gpu test_gpu.cpp ${TEST_ONNX_DIR})
target_link_libraries(test_api_gpu migraphx_gpu)
target_link_libraries(test_api_gpu hip::host)
add_api_test(custom_op_gpu test_custom_op_gpu.cpp ${TEST_ONNX_DIR})
target_link_libraries(test_api_custom_op_gpu migraphx_gpu)
target_link_libraries(test_api_custom_op_gpu hip::host)
endif()
......@@ -36,7 +36,7 @@ bool create_shapes(bool dynamic_allowed)
try
{
shape a{shape::int64_type, {3}};
shape b{shape::float_type, {{3, 6, 0}, {4, 4, 0}}};
shape b{shape::float_type, {{3, 6}, {4, 4}}};
auto op = migraphx::make_op("add");
migraphx::check_shapes{{a, b}, op, dynamic_allowed}.has(2);
return true;
......
......@@ -26,7 +26,6 @@
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/fpga/target.hpp>
#include <migraphx/target_assignments.hpp>
#include <migraphx/iterator_for.hpp>
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fuse_reduce.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/program.hpp>
#include <basic_ops.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include <pointwise.hpp>
void run_pass(migraphx::program& p)
{
migraphx::run_passes(p, {migraphx::fuse_reduce{}, migraphx::dead_code_elimination{}});
}
bool all_instructions_are_local(const migraphx::module& m)
{
return std::all_of(m.begin(), m.end(), [&](const auto& ins) {
return std::all_of(ins.inputs().begin(), ins.inputs().end(), [&](auto input) {
return m.has_instruction(input);
});
});
}
template <class F>
migraphx::instruction_ref add_reduce(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
const std::vector<int64_t>& axes,
F f)
{
auto* rm = p.create_module(name);
auto* mm = p.get_main_module();
rm->set_bypass();
std::vector<migraphx::instruction_ref> params;
std::transform(inputs.begin(), inputs.end(), std::back_inserter(params), [&](auto input) {
return rm->add_parameter(
"x" + std::to_string(params.size()),
migraphx::shape{input->get_shape().type(), input->get_shape().lens()});
});
auto r = f(rm, params, axes);
rm->add_return({r});
EXPECT(all_instructions_are_local(*rm));
return mm->add_instruction(migraphx::make_op("fused_reduce", {{"axes", axes}}), inputs, {rm});
}
inline auto single_reduce(const std::string& name)
{
return [=](auto* rm, const auto& inputs, const auto& axes) {
return rm->add_instruction(migraphx::make_op(name, {{"axes", axes}}), inputs);
};
}
TEST_CASE(single)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto rsum1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), x);
auto rsum2 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), y);
mm->add_return({rsum1, rsum2});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto rsum1 = add_reduce(p2, "main:reduce_sum0", {x}, {1}, single_reduce("reduce_sum"));
auto rsum2 = add_reduce(p2, "main:reduce_sum1", {y}, {1}, single_reduce("reduce_sum"));
mm->add_return({rsum1, rsum2});
}
EXPECT(p1 == p2);
}
TEST_CASE(pointwise_reduce)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto add = add_pointwise(p1, "main:pointwise0", {x, y}, single_pointwise("add"));
auto rsum = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), add);
mm->add_return({rsum});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto rsum = add_reduce(
p2,
"main:pointwise0:main:reduce_sum0",
{x, y},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto add =
add_pointwise(p2, rm, "main:pointwise0", inputs, single_pointwise("add"));
return rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}), add);
});
mm->add_return({rsum});
}
EXPECT(p1 == p2);
}
TEST_CASE(reduce_pointwise)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto rsum = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), x);
auto rsumb = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum);
auto add = add_pointwise(p1, "main:pointwise0", {rsumb, y}, single_pointwise("add"));
mm->add_return({add});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto add = add_reduce(
p2,
"main:reduce_sum0:main:pointwise0",
{x, y},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto rsum = rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}),
inputs[0]);
auto rsumb = rm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum);
return add_pointwise(
p2, rm, "main:pointwise0", {rsumb, inputs[1]}, single_pointwise("add"));
});
mm->add_return({add});
}
EXPECT(p1 == p2);
}
TEST_CASE(reduce_reduce)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), x);
auto rsumb = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum);
auto rsumdiff = add_pointwise(p1, "main:pointwise0", {rsumb, x}, single_pointwise("sub"));
auto rsum2 =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), rsumdiff);
auto sqrt = add_pointwise(p1, "main:pointwise1", {rsum2}, single_pointwise("sqrt"));
mm->add_return({sqrt});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto sqrt = add_reduce(
p2,
"main:reduce_sum1:main:reduce_sum0:main:pointwise0:main:pointwise1",
{x},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto rsum = rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}),
inputs[0]);
auto rsumb = rm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum);
auto rsumdiff = add_pointwise(
p2, rm, "main:pointwise0", {rsumb, inputs[0]}, single_pointwise("sub"));
auto rsum2 = rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}),
rsumdiff);
return add_pointwise(p2, rm, "main:pointwise1", {rsum2}, single_pointwise("sqrt"));
});
mm->add_return({sqrt});
}
EXPECT(p1 == p2);
}
TEST_CASE(reduce_reduce_mismatch_axis)
{
migraphx::shape s{migraphx::shape::float_type, {4, 2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), x);
auto rsum2 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {2}}}), rsum1);
mm->add_return({rsum2});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum1 = add_reduce(p2, "main:reduce_sum0", {x}, {1}, single_reduce("reduce_sum"));
auto rsum2 = add_reduce(p2, "main:reduce_sum1", {rsum1}, {2}, single_reduce("reduce_sum"));
mm->add_return({rsum2});
}
EXPECT(p1 == p2);
}
TEST_CASE(pointwise_reduce_broadcast)
{
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), x);
auto sqrt = add_pointwise(p1, "main:pointwise0", {rsum1}, single_pointwise("sqrt"));
auto sqrtb = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), sqrt);
auto add1 = add_pointwise(p1, "main:pointwise1", {sqrtb, x}, single_pointwise("add"));
auto rsum2 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), add1);
auto add2 = add_pointwise(p1, "main:pointwise2", {rsum2, rsum1}, single_pointwise("add"));
mm->add_return({add2});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto add2 = add_reduce(
p2,
"main:pointwise0:main:pointwise1:main:reduce_sum1:main:pointwise2:main:reduce_sum0",
{x},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto rsum1 = rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}),
inputs[0]);
auto sqrt =
add_pointwise(p2, rm, "main:pointwise0", {rsum1}, single_pointwise("sqrt"));
auto sqrtb = rm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), sqrt);
auto add1 = add_pointwise(
p2, rm, "main:pointwise1", {sqrtb, inputs[0]}, single_pointwise("add"));
auto rsum2 =
rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}), add1);
return add_pointwise(
p2, rm, "main:pointwise2", {rsum2, rsum1}, single_pointwise("add"));
});
mm->add_return({add2});
}
EXPECT(p1 == p2);
}
TEST_CASE(reduce_reduce_broadcast)
{
migraphx::shape s{migraphx::shape::float_type, {4, 2, 3}};
migraphx::program p1;
{
auto* mm = p1.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum1 = add_reduce(p1, "test:reduce_sum0", {x}, {1}, single_reduce("reduce_sum"));
auto rsumb = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum1);
auto add = add_reduce(
p1,
"test:reduce_sum1",
{rsumb, x},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto add2 =
add_pointwise(p1, rm, "test:pointwise0", inputs, single_pointwise("add"));
return rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}), add2);
});
mm->add_return({add});
}
run_pass(p1);
migraphx::program p2;
{
auto* mm = p2.get_main_module();
auto x = mm->add_parameter("x", s);
auto rsum = add_reduce(
p2,
"test:reduce_sum1:test:reduce_sum0",
{x},
{1},
[&](auto* rm, const auto& inputs, const auto& axes) {
auto rsum1 = rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}),
inputs[0]);
auto rsumb = rm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), rsum1);
auto add = add_pointwise(
p2, rm, "test:pointwise0", {rsumb, inputs[0]}, single_pointwise("add"));
return rm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", axes}}), add);
});
mm->add_return({rsum});
}
EXPECT(p1 == p2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -27,7 +27,7 @@
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/target.hpp>
TEST_CASE(tuple_to_from_gpu)
TEST_CASE(tuple_from_gpu)
{
migraphx::shape s1{migraphx::shape::float_type, {2, 3}};
migraphx::shape s2{migraphx::shape::int32_type, {2, 4}};
......@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu)
EXPECT(result2 == p2_data);
}
TEST_CASE(tuple_to_gpu)
{
migraphx::shape s1{migraphx::shape::float_type, {2, 3}};
migraphx::shape s2{migraphx::shape::int32_type, {2, 4}};
std::vector<float> p1_data = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
std::vector<int> p2_data = {1, 2, 3, 4, 5, 6, 7, 8};
auto p1 = migraphx::argument{s1, p1_data.data()};
auto p2 = migraphx::argument{s2, p2_data.data()};
auto p_gpu = migraphx::gpu::to_gpu(migraphx::argument({p1, p2}));
auto p_host = migraphx::gpu::from_gpu(p_gpu);
std::vector<migraphx::argument> results = p_host.get_sub_objects();
std::vector<float> result1;
results[0].visit([&](auto output) { result1.assign(output.begin(), output.end()); });
std::vector<int> result2;
results[1].visit([&](auto output) { result2.assign(output.begin(), output.end()); });
EXPECT(result1 == p1_data);
EXPECT(result2 == p2_data);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -27,8 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device_name.hpp>
......@@ -235,7 +235,7 @@ TEST_CASE(code_object_hip)
auto y = mm->add_parameter("output", input);
mm->add_instruction(co, x, y);
migraphx::compile_options options;
p.compile(migraphx::gpu::target{}, options);
p.compile(migraphx::make_target("gpu"), options);
auto result =
migraphx::gpu::from_gpu(p.eval({{"output", migraphx::gpu::allocate_gpu(input)}}).front());
......@@ -261,7 +261,7 @@ TEST_CASE(compile_code_object_hip)
auto x = mm->add_literal(input_literal);
auto y = mm->add_parameter("output", input);
mm->add_instruction(co, x, y);
p.compile(migraphx::gpu::target{}, migraphx::compile_options{});
p.compile(migraphx::make_target("gpu"), migraphx::compile_options{});
auto result =
migraphx::gpu::from_gpu(p.eval({{"output", migraphx::gpu::allocate_gpu(input)}}).front());
......@@ -284,7 +284,7 @@ TEST_CASE(compile_pointwise)
auto x = mm->add_literal(input_literal);
auto y = mm->add_parameter("output", input);
mm->add_instruction(co, x, y);
p.compile(migraphx::gpu::target{}, migraphx::compile_options{});
p.compile(migraphx::make_target("gpu"), migraphx::compile_options{});
auto result =
migraphx::gpu::from_gpu(p.eval({{"output", migraphx::gpu::allocate_gpu(input)}}).front());
......
......@@ -26,6 +26,7 @@
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
......@@ -35,7 +36,7 @@ void gpu_literal_test()
auto* mm = p.get_main_module();
auto lit = generate_literal(migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
mm->add_literal(lit);
p.compile(migraphx::gpu::target{});
p.compile(migraphx::make_target("gpu"));
auto scratch = p.get_parameter("scratch");
if(scratch == mm->end())
{
......
......@@ -25,7 +25,7 @@
#include <iostream>
#include <vector>
#include <hip/hip_runtime_api.h>
#include <migraphx/gpu/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <test.hpp>
#include <basic_ops.hpp>
......@@ -57,7 +57,7 @@ TEST_CASE(host_same_buffer_copy)
pp["a"] = migraphx::argument(ss, a_vec.data());
pp["b"] = migraphx::argument(ss, b_vec.data());
std::vector<float> gpu_result;
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::target gpu_t = migraphx::make_target("gpu");
migraphx::compile_options options;
options.offload_copy = true;
p.compile(gpu_t, options);
......
......@@ -25,7 +25,7 @@
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
......@@ -121,7 +121,7 @@ migraphx::argument run_gpu(migraphx::program p, const migraphx::parameter_map& i
migraphx::argument run_ref(migraphx::program p, const migraphx::parameter_map& inputs)
{
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
return p.eval(inputs).front();
}
......@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
......@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
......@@ -187,4 +187,30 @@ module {
EXPECT(verify_mlir(m));
}
TEST_CASE(dot_add)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.dot(%arg0, %arg1) : tensor<1x5x4xf32>, tensor<1x4x3xf32> -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
}
}
)__migraphx__";
migraphx::module m;
auto arg0 = m.add_parameter("arg0", {migraphx::shape::float_type, {1, 5, 4}});
auto arg1 = m.add_parameter("arg1", {migraphx::shape::float_type, {1, 4, 3}});
auto arg2 = m.add_parameter("arg2", {migraphx::shape::float_type, {1, 5, 3}});
auto conv = m.add_instruction(migraphx::make_op("dot"), arg0, arg1);
auto add = m.add_instruction(migraphx::make_op("add"), conv, arg2);
m.add_return({add});
auto s = migraphx::gpu::dump_mlir(m);
// Skip test if MLIR is not enabled
if(s.empty())
return;
CHECK(encode(s) == encode(mlir_output));
EXPECT(verify_mlir(m));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -27,8 +27,7 @@
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
......@@ -39,8 +38,8 @@
TEST_CASE(gpu_target_copy)
{
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::target ref_t = migraphx::ref::target{};
migraphx::target gpu_t = migraphx::make_target("gpu");
migraphx::target ref_t = migraphx::make_target("ref");
migraphx::shape s{migraphx::shape::int8_type, {2, 3, 4, 5}};
auto ref_arg_orig = migraphx::generate_argument(s, 0x123456L);
......@@ -104,11 +103,11 @@ TEST_CASE(int8_quantization)
m["a"] = migraphx::generate_argument(sa);
m["b"] = migraphx::generate_argument(sb);
std::vector<float> ref_result;
migraphx::target ref_t = migraphx::ref::target{};
migraphx::target ref_t = migraphx::make_target("ref");
run_prog(p, ref_t, m, ref_result);
std::vector<float> gpu_result;
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::target gpu_t = migraphx::make_target("gpu");
run_prog(p, gpu_t, m, gpu_result);
EXPECT(migraphx::verify_range(ref_result, gpu_result));
......
......@@ -24,6 +24,7 @@
#include <iostream>
#include <vector>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
......@@ -133,7 +134,7 @@ TEST_CASE(test_stream_sync)
auto mult_out = mm->add_instruction(migraphx::make_op("dot"), x, y);
mm->add_instruction(migraphx::make_op("add"), mult_out, test_val);
p.compile(migraphx::gpu::target{});
p.compile(migraphx::make_target("gpu"));
// Run network and then verify with kernel
auto args = p.eval({{"x", ginput}, {"output", goutput}}, {pstream.get(), true});
......
......@@ -30,12 +30,12 @@
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
migraphx::module_ref mm,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = p.create_module(name);
auto* mm = p.get_main_module();
pm->set_bypass();
std::vector<migraphx::instruction_ref> params;
std::transform(inputs.begin(), inputs.end(), std::back_inserter(params), [&](auto input) {
......@@ -47,6 +47,15 @@ migraphx::instruction_ref add_pointwise(migraphx::program& p,
return mm->add_instruction(migraphx::make_op("pointwise"), inputs, {pm});
}
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
return add_pointwise(p, p.get_main_module(), name, inputs, f);
}
inline auto single_pointwise(const std::string& name)
{
return [=](auto* pm, const auto& inputs) {
......
......@@ -22,12 +22,11 @@
* THE SOFTWARE.
*/
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/marker.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/register_target.hpp>
#include "test.hpp"
struct mock_marker
......@@ -64,7 +63,7 @@ TEST_CASE(marker)
auto one = mm->add_literal(1);
auto two = mm->add_literal(2);
mm->add_instruction(migraphx::make_op("add"), one, two);
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
mock_marker temp_marker;
p.mark({}, temp_marker);
......
......@@ -25,7 +25,7 @@
#include <migraphx/iterator_for.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ranges.hpp>
#include <sstream>
#include "test.hpp"
......
c9a53c925510a101f5ca94d5ecda0924e40a8463
ad4db1269972f92fdba932bb5770943291be3ca5
......@@ -186,14 +186,13 @@ TEST_CASE(argmax_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"x",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {5, 5, 0}, {6, 6, 0}}});
"x", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}, {5, 5}, {6, 6}}});
auto ins = mm->add_instruction(migraphx::make_op("argmax", {{"axis", 2}}), l0);
auto ret = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {2}}}), ins);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("argmax_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -296,8 +295,7 @@ TEST_CASE(averagepool_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}});
"0", {migraphx::shape::float_type, {{1, 4}, {3, 3}, {5, 5}, {5, 5}, {5, 5}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
......@@ -307,7 +305,7 @@ TEST_CASE(averagepool_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("averagepool_dyn_test.onnx", options);
EXPECT(p == prog);
}
......@@ -315,7 +313,7 @@ TEST_CASE(averagepool_dyn_test)
TEST_CASE(averagepool_dyn_autopad_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); }));
}
......@@ -323,7 +321,7 @@ TEST_CASE(averagepool_dyn_autopad_error_test)
TEST_CASE(averagepool_dyn_asym_padding_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_asym_padding_error_test.onnx", options); }));
}
......@@ -331,7 +329,7 @@ TEST_CASE(averagepool_dyn_asym_padding_error_test)
TEST_CASE(averagepool_dyn_cip_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_cip_error_test.onnx", options); }));
}
......@@ -589,15 +587,14 @@ TEST_CASE(binary_dyn_brcst_prelu_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 5}});
auto ret = add_common_op(*mm, migraphx::make_op("prelu"), {l0, l1});
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_prelu_test.onnx", options);
EXPECT(p == prog);
......@@ -609,14 +606,13 @@ TEST_CASE(binary_dyn_brcst_add_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {4, 5}});
auto l1 = mm->add_parameter(
"1",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
"1", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}});
auto ret = add_common_op(*mm, migraphx::make_op("add"), {l0, l1});
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_add_test.onnx", options);
EXPECT(p == prog);
......@@ -625,7 +621,7 @@ TEST_CASE(binary_dyn_brcst_add_test)
TEST_CASE(binary_dyn_brcst_attr_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("binary_dyn_brcst_attr_error_test.onnx", options); }));
}
......@@ -635,8 +631,7 @@ TEST_CASE(binary_dyn_brcst_mul_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}});
auto l1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 1}});
auto bl1 = mm->add_instruction(
......@@ -648,7 +643,7 @@ TEST_CASE(binary_dyn_brcst_mul_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("binary_dyn_brcst_mul_test.onnx", options);
EXPECT(p == prog);
......@@ -845,15 +840,15 @@ TEST_CASE(concat_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {1, 4, 0}, {3, 3, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1, 4}, {3, 3}}});
auto l1 = mm->add_parameter(
"1", migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {1, 4, 0}, {3, 3, 0}}});
"1", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1, 4}, {3, 3}}});
auto ret = mm->add_instruction(migraphx::make_op("concat"), l0, l1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("concat_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -1120,8 +1115,8 @@ TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 6}, {3, 3}, {5, 5}, {5, 5}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
......@@ -1131,7 +1126,7 @@ TEST_CASE(conv_dynamic_batch_test)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
options.default_dyn_dim_value = {1, 6};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
......@@ -1141,8 +1136,8 @@ TEST_CASE(conv_dynamic_bias_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {32, 32, 0}, {32, 32, 0}}});
auto x0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 6}, {3, 3}, {32, 32}, {32, 32}}});
auto x1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto x2 = mm->add_parameter("2", {migraphx::shape::float_type, {1}});
auto x3 = mm->add_instruction(migraphx::make_op("convolution"), x0, x1);
......@@ -1151,7 +1146,7 @@ TEST_CASE(conv_dynamic_bias_test)
mm->add_return({x5});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
options.default_dyn_dim_value = {1, 6};
auto prog = migraphx::parse_onnx("conv_dynamic_bias_test.onnx", options);
EXPECT(p == prog);
}
......@@ -1160,8 +1155,8 @@ TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {5, 10}, {5, 10}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
......@@ -1171,7 +1166,7 @@ TEST_CASE(conv_dynamic_img_test)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.default_dyn_dim_value = {5, 10};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
......@@ -1182,8 +1177,8 @@ TEST_CASE(conv_dynamic_weights_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto l1 =
mm->add_parameter("1", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {2, 4}, {2, 4}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
......@@ -1192,7 +1187,7 @@ TEST_CASE(conv_dynamic_weights_test)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
options.default_dyn_dim_value = {2, 4};
auto prog = migraphx::parse_onnx("conv_dynamic_weights_test.onnx", options);
EXPECT(p == prog);
......@@ -1202,10 +1197,10 @@ TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto l0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {5, 10}, {5, 10}}});
auto l1 =
mm->add_parameter("1", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {2, 4}, {2, 4}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
......@@ -1214,8 +1209,8 @@ TEST_CASE(conv_dynamic_img_and_weights_test)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.map_dyn_input_dims["1"] = {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}};
options.default_dyn_dim_value = {5, 10};
options.map_dyn_input_dims["1"] = {{1, 1}, {3, 3}, {2, 4}, {2, 4}};
auto prog = migraphx::parse_onnx("conv_dynamic_img_and_weights_test.onnx", options);
EXPECT(p == prog);
......@@ -1225,8 +1220,8 @@ TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 10}, {3, 3}, {5, 5}, {5, 5}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
......@@ -1236,7 +1231,7 @@ TEST_CASE(conv_dynamic_batch_same_upper)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.default_dyn_dim_value = {1, 10};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_same_upper_test.onnx", options);
EXPECT(p == prog);
......@@ -1246,8 +1241,8 @@ TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l0 =
mm->add_parameter("0", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {5, 10}, {5, 10}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
......@@ -1260,7 +1255,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.default_dyn_dim_value = {5, 10};
auto prog = migraphx::parse_onnx("conv_dynamic_img_same_upper_test.onnx", options);
EXPECT(p == prog);
......@@ -1271,8 +1266,8 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto l1 =
mm->add_parameter("1", {migraphx::shape::float_type, {{1, 1}, {3, 3}, {2, 4}, {2, 4}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
......@@ -1284,7 +1279,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
options.default_dyn_dim_value = {2, 4};
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
......@@ -2030,14 +2025,13 @@ TEST_CASE(flatten_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto ret = mm->add_instruction(migraphx::make_op("flatten", {{"axis", 2}}), c0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("flatten_dyn_test.onnx", options);
EXPECT(p == prog);
}
......@@ -2087,11 +2081,9 @@ TEST_CASE(gather_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"data",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {5, 5, 0}, {6, 6, 0}}});
"data", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}, {5, 5}, {6, 6}}});
auto l1 = mm->add_parameter(
"indices",
migraphx::shape{migraphx::shape::int32_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
"indices", migraphx::shape{migraphx::shape::int32_type, {{1, 4}, {3, 3}, {4, 4}, {5, 5}}});
auto cont_l0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto cont_l1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
......@@ -2101,7 +2093,7 @@ TEST_CASE(gather_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("gather_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -2181,15 +2173,15 @@ TEST_CASE(gathernd_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data",
migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4}}});
auto l0 = mm->add_parameter(
"data", migraphx::shape{migraphx::shape::float_type, {{2, 4, {2}}, {2, 4}}});
auto l1 = mm->add_parameter("indices",
migraphx::shape{migraphx::shape::int64_type, {{1, 3}, {2, 2}}});
auto r = mm->add_instruction(migraphx::make_op("gathernd"), l0, l1);
mm->add_return({r});
migraphx::onnx_options options;
options.map_dyn_input_dims["data"] = {{2, 4, 2}, {2, 4}};
options.map_dyn_input_dims["data"] = {{2, 4, {2}}, {2, 4}};
options.map_dyn_input_dims["indices"] = {{1, 3}, {2, 2}};
auto prog = migraphx::parse_onnx("gathernd_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -2318,9 +2310,9 @@ TEST_CASE(gemm_dyn_inner_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"A", migraphx::shape{migraphx::shape::float_type, {{1, 10, 8}, {6, 6, 0}}});
"A", migraphx::shape{migraphx::shape::float_type, {{1, 10, {8}}, {6, 6}}});
auto l1 = mm->add_parameter(
"B", migraphx::shape{migraphx::shape::float_type, {{1, 10, 8}, {7, 7, 0}}});
"B", migraphx::shape{migraphx::shape::float_type, {{1, 10, {8}}, {7, 7}}});
auto alpha = 0.5f;
auto a_l = mm->add_literal(alpha);
auto t_a = add_common_op(*mm, migraphx::make_op("mul"), {a_l, l0});
......@@ -2329,7 +2321,7 @@ TEST_CASE(gemm_dyn_inner_test)
mm->add_return({dot});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 8};
options.default_dyn_dim_value = {1, 10, {8}};
auto prog = migraphx::parse_onnx("gemm_dyn_inner_test.onnx", options);
EXPECT(p == prog);
}
......@@ -2339,7 +2331,7 @@ TEST_CASE(gemm_dyn_outer_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"A", migraphx::shape{migraphx::shape::float_type, {{5, 5, 0}, {5, 10, 7}}});
"A", migraphx::shape{migraphx::shape::float_type, {{5, 5}, {5, 10, {7}}}});
auto l1 = mm->add_parameter("B", migraphx::shape{migraphx::shape::float_type, {11, 5}});
auto alpha = 2.f;
auto a_l = mm->add_literal(alpha);
......@@ -2350,7 +2342,7 @@ TEST_CASE(gemm_dyn_outer_test)
mm->add_return({dot});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 7};
options.default_dyn_dim_value = {5, 10, {7}};
auto prog = migraphx::parse_onnx("gemm_dyn_outer_test.onnx", options);
EXPECT(p == prog);
}
......@@ -2401,10 +2393,8 @@ TEST_CASE(globalavgpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto input = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {16, 16}, {16, 16}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"lengths", {16, 16}},
......@@ -2413,7 +2403,7 @@ TEST_CASE(globalavgpool_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("globalavgpool_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -2440,10 +2430,8 @@ TEST_CASE(globallppool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {16, 32, 0}, {16, 32, 0}}});
auto input = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 1}, {3, 3}, {16, 32}, {16, 32}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::lpnorm},
{"dyn_global", true},
......@@ -2453,7 +2441,7 @@ TEST_CASE(globallppool_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {16, 32, 0};
options.default_dyn_dim_value = {16, 32};
auto prog = migraphx::parse_onnx("globallppool_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -2480,10 +2468,8 @@ TEST_CASE(globalmaxpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {32, 32, 0}, {32, 32, 0}}});
auto input = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {32, 32}, {32, 32}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"lengths", {32, 32}},
......@@ -2492,7 +2478,7 @@ TEST_CASE(globalmaxpool_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("globalmaxpool_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -3691,16 +3677,16 @@ TEST_CASE(matmul_dyn_mm_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"1", migraphx::shape{migraphx::shape::float_type, {{4, 8, 6}, {7, 7, 0}}});
auto l1 = mm->add_parameter(
"2", migraphx::shape{migraphx::shape::float_type, {{7, 7, 0}, {1, 5, 3}}});
auto l0 =
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {{4, 8, {6}}, {7, 7}}});
auto l1 =
mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {{7, 7}, {1, 5, {3}}}});
auto ret = migraphx::add_apply_alpha_beta(*mm, {l0, l1}, migraphx::make_op("dot"), 1.0f, 0.0f);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["1"] = {{4, 8, 6}, {7, 7, 0}};
options.map_dyn_input_dims["2"] = {{7, 7, 0}, {1, 5, 3}};
options.map_dyn_input_dims["1"] = {{4, 8, {6}}, {7, 7}};
options.map_dyn_input_dims["2"] = {{7, 7}, {1, 5, {3}}};
auto prog = parse_onnx("matmul_dyn_mm_test.onnx", options);
EXPECT(p == prog);
......@@ -3710,8 +3696,8 @@ TEST_CASE(matmul_dyn_mv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"1", migraphx::shape{migraphx::shape::float_type, {{4, 8, 6}, {7, 7, 0}}});
auto l0 =
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {{4, 8, {6}}, {7, 7}}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}});
auto sl1 = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), l1);
auto res = migraphx::add_apply_alpha_beta(*mm, {l0, sl1}, migraphx::make_op("dot"), 1.0f, 0.0f);
......@@ -3719,7 +3705,7 @@ TEST_CASE(matmul_dyn_mv_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["1"] = {{4, 8, 6}, {7, 7, 0}};
options.map_dyn_input_dims["1"] = {{4, 8, {6}}, {7, 7}};
auto prog = parse_onnx("matmul_dyn_mv_test.onnx", options);
EXPECT(p == prog);
......@@ -3731,14 +3717,14 @@ TEST_CASE(matmul_dyn_vm_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}});
auto l1 = mm->add_parameter(
"2", migraphx::shape{migraphx::shape::float_type, {{7, 7, 0}, {4, 10, 8}}});
"2", migraphx::shape{migraphx::shape::float_type, {{7, 7}, {4, 10, {8}}}});
auto sl0 = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0}}}), l0);
auto res = migraphx::add_apply_alpha_beta(*mm, {sl0, l1}, migraphx::make_op("dot"), 1.0f, 0.0f);
auto ret = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), res);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["2"] = {{7, 7, 0}, {4, 10, 8}};
options.map_dyn_input_dims["2"] = {{7, 7}, {4, 10, {8}}};
auto prog = parse_onnx("matmul_dyn_vm_test.onnx", options);
EXPECT(p == prog);
......@@ -3748,7 +3734,7 @@ TEST_CASE(matmul_dyn_vv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{5, 8, 7};
migraphx::shape::dynamic_dimension dd{5, 8, {7}};
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {dd}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {dd}});
auto sl0 = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0}}}), l0);
......@@ -3769,7 +3755,7 @@ TEST_CASE(matmul_dyn_vv_test)
TEST_CASE(matmul_dyn_broadcast_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws([&] { migraphx::parse_onnx("matmul_dyn_broadcast_error.onnx", options); }));
}
......@@ -3789,7 +3775,7 @@ TEST_CASE(matmulinteger_test)
TEST_CASE(matmulinteger_dyn_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws([&] { migraphx::parse_onnx("matmulinteger_dyn_error.onnx", options); }));
}
......@@ -4098,13 +4084,13 @@ TEST_CASE(neg_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int64_type, {{1, 10, 0}, {3, 3, 0}}};
migraphx::shape s{migraphx::shape::int64_type, {{1, 10}, {3, 3}}};
auto input = mm->add_parameter("0", s);
auto ret = mm->add_instruction(migraphx::make_op("neg"), input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.default_dyn_dim_value = {1, 10};
auto prog = migraphx::parse_onnx("neg_dynamic_test.onnx", options);
EXPECT(p == prog);
}
......@@ -4140,9 +4126,9 @@ TEST_CASE(nms_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 10, 0}, {6, 6, 0}, {4, 4, 0}}};
migraphx::shape sb{migraphx::shape::float_type, {{1, 10}, {6, 6}, {4, 4}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {6, 6, 0}}};
migraphx::shape ss{migraphx::shape::float_type, {{1, 10}, {1, 1}, {6, 6}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
......@@ -4161,7 +4147,7 @@ TEST_CASE(nms_dynamic_batch_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.default_dyn_dim_value = {1, 10};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_batch_test.onnx", options);
......@@ -4172,9 +4158,9 @@ TEST_CASE(nms_dynamic_boxes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {{1, 1, 0}, {6, 20, 0}, {4, 4, 0}}};
migraphx::shape sb{migraphx::shape::float_type, {{1, 1}, {6, 20}, {4, 4}}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 1, 0}, {6, 20, 0}}};
migraphx::shape ss{migraphx::shape::float_type, {{1, 1}, {1, 1}, {6, 20}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
......@@ -4187,7 +4173,7 @@ TEST_CASE(nms_dynamic_boxes_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {6, 20, 0};
options.default_dyn_dim_value = {6, 20};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_boxes_test.onnx", options);
......@@ -4200,7 +4186,7 @@ TEST_CASE(nms_dynamic_classes_test)
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::float_type, {1, 6, 4}};
auto b = mm->add_parameter("boxes", sb);
migraphx::shape ss{migraphx::shape::float_type, {{1, 1, 0}, {1, 10, 0}, {6, 6, 0}}};
migraphx::shape ss{migraphx::shape::float_type, {{1, 1}, {1, 10}, {6, 6}}};
auto s = mm->add_parameter("scores", ss);
migraphx::shape smo{migraphx::shape::int64_type, {1}};
auto mo = mm->add_parameter("max_output_boxes_per_class", smo);
......@@ -4213,7 +4199,7 @@ TEST_CASE(nms_dynamic_classes_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
options.default_dyn_dim_value = {1, 10};
options.use_dyn_output = true;
auto prog = migraphx::parse_onnx("nms_dynamic_classes_test.onnx", options);
......@@ -4388,12 +4374,12 @@ TEST_CASE(pad_attr_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, {2}}, {2, 4, {2}}}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {1, 1, 1, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
options.map_dyn_input_dims["0"] = {{2, 4, {2}}, {2, 4, {2}}};
auto prog = parse_onnx("pad_attr_dyn_test.onnx", options);
EXPECT(p == prog);
}
......@@ -4403,13 +4389,13 @@ TEST_CASE(pad_cnst_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, 2}, {2, 4, 2}}});
"0", migraphx::shape{migraphx::shape::float_type, {{2, 4, {2}}, {2, 4, {2}}}});
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {4}}, {0, 2, 0, 1}});
auto ret = mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 2, 0, 1}}}), x);
mm->add_return({ret});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 4, 2}, {2, 4, 2}};
options.map_dyn_input_dims["0"] = {{2, 4, {2}}, {2, 4, {2}}};
auto prog = parse_onnx("pad_cnst_dyn_test.onnx", options);
EXPECT(p == prog);
}
......@@ -4417,7 +4403,7 @@ TEST_CASE(pad_cnst_dyn_test)
TEST_CASE(pad_dyn_reflect_error)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 2};
options.default_dyn_dim_value = {2, 4, {2}};
EXPECT(test::throws([&] { migraphx::parse_onnx("pad_dyn_reflect_error.onnx", options); }));
}
......@@ -4881,7 +4867,7 @@ TEST_CASE(reducel1_dyn_test)
// a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins);
......@@ -4889,7 +4875,7 @@ TEST_CASE(reducel1_dyn_test)
mm->add_return({sq_ins});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}};
auto prog = migraphx::parse_onnx("reducel1_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -4901,7 +4887,7 @@ TEST_CASE(reducel1_dyn_test)
// No axes given in the onnx file. Parser should default to all axes.
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3, 0}, {3, 5, 0}, {4, 6, 5}, {5, 7, 6}}});
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), abs_ins);
......@@ -4910,7 +4896,7 @@ TEST_CASE(reducel1_dyn_test)
mm->add_return({sq_ins});
migraphx::onnx_options options;
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, 5}, {5, 7, 6}};
options.map_dyn_input_dims["x"] = {{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}};
auto prog = migraphx::parse_onnx("reducel1_dyn_noaxes_test.onnx", options);
EXPECT(p == prog);
......@@ -5116,8 +5102,10 @@ TEST_CASE(reshape_test)
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {2}}, reshape_dims});
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
op.dims = reshape_dims;
mm->add_instruction(op, l0);
mm->add_instruction(op, l0);
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
mm->add_instruction(op, c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
mm->add_instruction(op, c1);
auto prog = optimize_onnx("reshape_test.onnx");
EXPECT(p == prog);
......@@ -5806,17 +5794,17 @@ TEST_CASE(scatternd_dyn_test)
auto* mm = p.get_main_module();
// parameters with dynamic dimensions
auto l0 = mm->add_parameter(
"data", migraphx::shape{migraphx::shape::float_type, {{1, 3, 2}, {2, 2}, {2, 2}}});
"data", migraphx::shape{migraphx::shape::float_type, {{1, 3, {2}}, {2, 2}, {2, 2}}});
auto l1 = mm->add_parameter(
"indices", migraphx::shape{migraphx::shape::int64_type, {{2, 1, 2}, {1, 1}, {2, 2}}});
"indices", migraphx::shape{migraphx::shape::int64_type, {{2, 1, {2}}, {1, 1}, {2, 2}}});
auto l2 = mm->add_parameter(
"updates", migraphx::shape{migraphx::shape::float_type, {{2, 1, 2}, {1, 1}, {2, 2}}});
"updates", migraphx::shape{migraphx::shape::float_type, {{2, 1, {2}}, {1, 1}, {2, 2}}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_none"), l0, l1, l2);
mm->add_return({r});
migraphx::onnx_options options;
options.map_dyn_input_dims["data"] = {{1, 3, 2}, {2, 2}, {2, 2}};
options.map_dyn_input_dims["indices"] = {{2, 1, 2}, {1, 1}, {2, 2}};
options.map_dyn_input_dims["updates"] = {{2, 1, 2}, {1, 1}, {2, 2}};
options.map_dyn_input_dims["data"] = {{1, 3, {2}}, {2, 2}, {2, 2}};
options.map_dyn_input_dims["indices"] = {{2, 1, {2}}, {1, 1}, {2, 2}};
options.map_dyn_input_dims["updates"] = {{2, 1, {2}}, {1, 1}, {2, 2}};
auto prog = migraphx::parse_onnx("scatternd_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -5950,7 +5938,7 @@ TEST_CASE(sinh_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{1, 10, 0};
migraphx::shape::dynamic_dimension dd{1, 10};
std::vector<migraphx::shape::dynamic_dimension> dyn_dims;
dyn_dims.push_back(dd);
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dyn_dims});
......@@ -6016,7 +6004,7 @@ TEST_CASE(slice_dyn_test)
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{3, 3, 0}, {1, 3, 0}, {2, 2, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{3, 3}, {1, 3}, {2, 2}}});
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), l0);
mm->add_return({ret});
......@@ -6024,7 +6012,7 @@ TEST_CASE(slice_dyn_test)
migraphx::onnx_options options;
// Parser converts the dynamic input shape to static unless there is at least one non-fixed
// dynamic dimension. Slicing is not allowed along the non-fixed axis 1.
options.map_dyn_input_dims["0"] = {{3, 3, 0}, {1, 3, 0}, {2, 2, 0}};
options.map_dyn_input_dims["0"] = {{3, 3}, {1, 3}, {2, 2}};
auto prog = migraphx::parse_onnx("slice_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -6035,7 +6023,7 @@ TEST_CASE(slice_step_dyn_test)
// A slice command with non-default steps will have a "Step" instruction added in parsing.
// At the time of writing, Step doesn't support dynamic shape input.
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws([&] { migraphx::parse_onnx("slice_step_dyn_test.onnx", options); }));
}
......@@ -6044,7 +6032,7 @@ TEST_CASE(slice_reverse_dyn_test)
// A slice command with negative step on any axis will have a "Reverse" instruction added in
// parsing. At the time of writing, Reverse doesn't support dynamic shape input.
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws([&] { migraphx::parse_onnx("slice_reverse_dyn_test.onnx", options); }));
}
......@@ -6173,13 +6161,12 @@ TEST_CASE(softmax_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}, {4, 4}}});
auto ret = mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("softmax_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -6389,10 +6376,9 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
auto* mm = p.get_main_module();
std::vector<int64_t> squeeze_axes{0, 2, 3, 5};
std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5};
auto l0 = mm->add_parameter(
"0",
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {1, 4, 0}, {1, 1, 0}, {1, 1, 0}, {1, 4, 0}, {1, 1, 0}}});
{{1, 1}, {1, 4}, {1, 1}, {1, 1}, {1, 4}, {1, 1}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", squeeze_axes}}), c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
......@@ -6400,7 +6386,7 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("squeeze_unsqueeze_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -6694,14 +6680,13 @@ TEST_CASE(transpose_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}, {3, 3}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -6891,7 +6876,7 @@ TEST_CASE(variable_batch_user_input_test1)
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 2, 0};
options.default_dyn_dim_value = {2, 2};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
......@@ -6902,14 +6887,13 @@ TEST_CASE(variable_batch_user_input_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto l0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 5}, {3, 3}, {16, 16}, {16, 16}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 5, 0};
options.default_dyn_dim_value = {2, 5};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
......@@ -6920,14 +6904,13 @@ TEST_CASE(variable_batch_user_input_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto l0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{2, 5}, {3, 3}, {16, 16}, {16, 16}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
options.map_dyn_input_dims["0"] = {{2, 5}, {3, 3}, {16, 16}, {16, 16}};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
......@@ -6955,7 +6938,7 @@ TEST_CASE(variable_batch_user_input_test5)
// Error using default_dim_value and default_dyn_dim_value
migraphx::onnx_options options;
options.default_dim_value = 2;
options.default_dyn_dim_value = {1, 2, 0};
options.default_dyn_dim_value = {1, 2};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
}
......@@ -6964,7 +6947,7 @@ TEST_CASE(variable_batch_user_input_test6)
{
// Error using both map_dyn_input_dims and map_input_dims
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
options.map_dyn_input_dims["0"] = {{2, 5}, {3, 3}, {16, 16}, {16, 16}};
options.map_input_dims["0"] = {2, 3, 16, 16};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
......@@ -7012,17 +6995,17 @@ TEST_CASE(where_dyn_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto lc = mm->add_parameter(
"c", migraphx::shape{migraphx::shape::bool_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}}});
"c", migraphx::shape{migraphx::shape::bool_type, {{1, 4}, {2, 2}, {2, 2}}});
auto lx = mm->add_parameter(
"x", migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}}});
"x", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}});
auto ly = mm->add_parameter(
"y", migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}}});
"y", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}});
auto r = mm->add_instruction(migraphx::make_op("where"), lc, lx, ly);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("where_dyn_test.onnx", options);
EXPECT(p == prog);
......@@ -7032,7 +7015,7 @@ TEST_CASE(where_mixed_test)
{
// mixture of static and dynamic input shapes is not supported
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws([&] { migraphx::parse_onnx("where_mixed_test.onnx", options); }));
}
......
......@@ -26,7 +26,7 @@
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
......@@ -36,7 +36,7 @@
TEST_CASE(averagepool_notset_test)
{
auto p = migraphx::parse_onnx("averagepool_notset_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
std::vector<float> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::float_type, {1, 1, 5, 5}};
......@@ -54,7 +54,7 @@ TEST_CASE(averagepool_notset_test)
TEST_CASE(averagepool_nt_cip_test)
{
auto p = migraphx::parse_onnx("averagepool_nt_cip_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
std::vector<float> data_x = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
migraphx::shape s_x{migraphx::shape::float_type, {1, 1, 5, 5}};
......@@ -72,7 +72,7 @@ TEST_CASE(averagepool_nt_cip_test)
TEST_CASE(batch_norm_flat_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_flat_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_shape{migraphx::shape::float_type, {10}};
migraphx::shape c_shape(migraphx::shape::float_type, {1});
......@@ -118,7 +118,7 @@ TEST_CASE(batch_norm_flat_test)
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_rank_2_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_shape{migraphx::shape::float_type, {2, 5}};
migraphx::shape c_shape(migraphx::shape::float_type, {5});
......@@ -155,7 +155,7 @@ TEST_CASE(batch_norm_rank_2_test)
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_shape{migraphx::shape::half_type, {2, 3, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {3});
......@@ -191,7 +191,7 @@ TEST_CASE(batch_norm_1d_test)
TEST_CASE(batch_norm_2d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_2d_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_shape{migraphx::shape::float_type, {2, 3, 4, 4}};
migraphx::shape c_shape(migraphx::shape::float_type, {3});
......@@ -257,7 +257,7 @@ TEST_CASE(batch_norm_2d_test)
TEST_CASE(batch_norm_3d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_3d_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_shape{migraphx::shape::half_type, {2, 2, 2, 2, 2}};
migraphx::shape c_shape(migraphx::shape::half_type, {2});
......@@ -299,7 +299,7 @@ TEST_CASE(batch_norm_3d_test)
TEST_CASE(celu_verify_test)
{
migraphx::program p = migraphx::parse_onnx("celu_verify_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {-5.5, 2.0, 100., 7.0, 0., -1.};
......@@ -321,7 +321,7 @@ TEST_CASE(celu_verify_test)
TEST_CASE(clip_args_type_mismatch)
{
auto p = migraphx::parse_onnx("clip_test_args_type_mismatch.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_0{migraphx::shape::float_type, {3, 3}};
migraphx::parameter_map pp;
std::vector<float> data_0 = {0.9, 1.2, 1.7, 1.9, 2.2, 2.7, 2.9, 3.2, 3.7};
......@@ -337,7 +337,7 @@ TEST_CASE(clip_args_type_mismatch)
TEST_CASE(depthtospace_simple_test)
{
auto p = migraphx::parse_onnx("depthtospace_simple_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
migraphx::shape s_x{migraphx::shape::float_type, {1, 8, 2, 3}};
......@@ -355,7 +355,7 @@ TEST_CASE(depthtospace_simple_test)
TEST_CASE(spacetodepth_simple_test)
{
auto p = migraphx::parse_onnx("spacetodepth_simple_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
migraphx::shape s_x{migraphx::shape::float_type, {1, 2, 4, 6}};
......@@ -374,7 +374,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
{
// space to depth
auto p1 = migraphx::parse_onnx("spacetodepth_simple_test.onnx");
p1.compile(migraphx::ref::target{});
p1.compile(migraphx::make_target("ref"));
std::vector<float> data_in(48);
std::iota(std::begin(data_in), std::end(data_in), 0);
migraphx::shape s_x_1{migraphx::shape::float_type, {1, 2, 4, 6}};
......@@ -383,7 +383,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
auto result1 = p1.eval(pp1).back();
// depth to space
auto p2 = migraphx::parse_onnx("depthtospace_simple_test.onnx");
p2.compile(migraphx::ref::target{});
p2.compile(migraphx::make_target("ref"));
migraphx::parameter_map pp2;
pp2["x"] = result1;
auto result2 = p2.eval(pp2).back();
......@@ -395,7 +395,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
TEST_CASE(eyelike_verify_test)
{
migraphx::program p = migraphx::parse_onnx("eyelike_verify_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3, 4}};
std::vector<float> data{12, 0};
......@@ -413,7 +413,7 @@ TEST_CASE(eyelike_verify_test)
TEST_CASE(eyelike_verify_negk_test)
{
migraphx::program p = migraphx::parse_onnx("eyelike_verify_negk_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3, 4}};
std::vector<float> data{12, 0};
......@@ -431,7 +431,7 @@ TEST_CASE(eyelike_verify_negk_test)
TEST_CASE(gather_elements)
{
migraphx::program p = migraphx::parse_onnx("gather_elements_axis0_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {3, 4}};
std::vector<float> data = {
0.25, 0.75, 0.9375, 0.4375, 0.6875, 0.5625, -0.875, 0.1875, -0.125, 0.5, -0.9375, -0.0625};
......@@ -454,7 +454,7 @@ TEST_CASE(gather_elements)
TEST_CASE(gemm_test)
{
migraphx::program p = migraphx::parse_onnx("gemm_brcst_C_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape a_shape{migraphx::shape::float_type, {5, 6}};
std::vector<float> a_data = {0.26472837, 0.8525864, 0.41929847, 0.14151508, 0.43216065,
......@@ -498,7 +498,7 @@ TEST_CASE(gemm_test)
TEST_CASE(gemm_half_test)
{
migraphx::program p = migraphx::parse_onnx("gemm_half_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape a_shape{migraphx::shape::half_type, {8, 6}};
std::vector<float> tmp = {0.2646, 0.8525, 0.4192, 0.1415, 0.4321, 0.675, 0.4248, 0.8203,
......@@ -542,7 +542,7 @@ TEST_CASE(gemm_half_test)
TEST_CASE(greaterorequal_test)
{
migraphx::program p = migraphx::parse_onnx("greaterorequal_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3}};
std::vector<float> data1 = {0.25, 0.75, 0.9375};
......@@ -563,7 +563,7 @@ TEST_CASE(greaterorequal_test)
TEST_CASE(hardsigmoid_verify_test)
{
migraphx::program p = migraphx::parse_onnx("hardsigmoid_verify_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 5}};
std::vector<float> data = {-10.0, -2.5, -1.0, -0.5, 0, 1.0, 2.0, 2.5, 2.6, 100.0};
......@@ -587,7 +587,7 @@ TEST_CASE(hardsigmoid_verify_test)
TEST_CASE(if_else_test)
{
migraphx::program p = migraphx::parse_onnx("if_else_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
......@@ -609,7 +609,7 @@ TEST_CASE(if_else_test)
TEST_CASE(if_else_test_inlined)
{
migraphx::program p = migraphx::parse_onnx("if_else_test_inlined.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
......@@ -628,7 +628,7 @@ TEST_CASE(if_else_test_inlined)
TEST_CASE(if_then_test)
{
migraphx::program p = migraphx::parse_onnx("if_then_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
......@@ -651,7 +651,7 @@ TEST_CASE(if_then_test)
TEST_CASE(if_then_test_inlined)
{
migraphx::program p = migraphx::parse_onnx("if_then_test_inlined.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
......@@ -671,7 +671,7 @@ TEST_CASE(if_literal_test)
{
auto run_prog = [](bool cond) {
migraphx::program p = migraphx::parse_onnx("if_literal_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::bool_type};
std::vector<char> data = {static_cast<char>(cond)};
......@@ -704,7 +704,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
{
migraphx::program p =
migraphx::parse_onnx("if_then_else_multi_output_shapes_inlined_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape x_data{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape y_data{migraphx::shape::float_type, {2, 3}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
......@@ -733,7 +733,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
TEST_CASE(if_then_else_multi_output_shapes_test)
{
migraphx::program p = migraphx::parse_onnx("if_then_else_multi_output_shapes_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_data{migraphx::shape::float_type, {2, 3, 1}};
std::vector<float> data = {0.0625, 0.75, -0.0625, 0.125, -0.125, -0.5625};
migraphx::shape bool_data{migraphx::shape::bool_type, {1}};
......@@ -765,7 +765,7 @@ TEST_CASE(if_pl_test)
{
auto run_prog = [](bool cond) {
migraphx::program p = migraphx::parse_onnx("if_pl_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::float_type, {2, 3}};
migraphx::shape ys{migraphx::shape::float_type, {3, 3}};
migraphx::shape cond_s{migraphx::shape::bool_type};
......@@ -805,7 +805,7 @@ TEST_CASE(if_tuple_test)
{
auto run_prog = [](bool cond) {
migraphx::program p = migraphx::parse_onnx("if_tuple_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::float_type, {1, 4}};
migraphx::shape ys{migraphx::shape::float_type, {3, 4}};
migraphx::shape cond_s{migraphx::shape::bool_type};
......@@ -854,7 +854,7 @@ TEST_CASE(instance_norm_test)
{
migraphx::program p = migraphx::parse_onnx("instance_norm_val_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> result_vector(9);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
......@@ -884,7 +884,7 @@ TEST_CASE(instance_norm_3d_test)
{
migraphx::program p = migraphx::parse_onnx("instance_norm_val_3d_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> result_vector(16);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
......@@ -912,7 +912,7 @@ TEST_CASE(instance_norm_3d_test)
TEST_CASE(lessorequal_test)
{
migraphx::program p = migraphx::parse_onnx("lessorequal_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3}};
std::vector<float> data1 = {0.25, 0.75, 0.9375};
......@@ -933,7 +933,7 @@ TEST_CASE(lessorequal_test)
TEST_CASE(lpnormalization_1norm)
{
migraphx::program p = migraphx::parse_onnx("lpnormalization_l1_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3, 4}};
std::vector<float> data{0.f, 2.f, -2.f, 1.f, 1.f, -5.f, 3.f, -1.f, -4.f, 3.f, 0.f, 0.f};
migraphx::parameter_map pp;
......@@ -961,7 +961,7 @@ TEST_CASE(lpnormalization_1norm)
TEST_CASE(lpnormalization_2norm)
{
migraphx::program p = migraphx::parse_onnx("lpnormalization_l2_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3, 4}};
std::vector<float> data{0.f, 2.f, -2.f, 1.f, 1.f, -5.f, 3.f, -1.f, -4.f, 3.f, 0.f, 0.f};
migraphx::parameter_map pp;
......@@ -989,7 +989,7 @@ TEST_CASE(lpnormalization_2norm)
TEST_CASE(mean_broadcast_test)
{
migraphx::program p = migraphx::parse_onnx("mean_broadcast_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s0{migraphx::shape::float_type, {1, 3, 4}};
std::vector<float> data0(12, 1);
......@@ -1020,7 +1020,7 @@ TEST_CASE(mean_broadcast_test)
TEST_CASE(mean_test)
{
migraphx::program p = migraphx::parse_onnx("mean_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::double_type, {2, 2, 2}};
const int num_elms = 8;
......@@ -1047,7 +1047,7 @@ TEST_CASE(mean_test)
TEST_CASE(mean_integral_test)
{
migraphx::program p = migraphx::parse_onnx("mean_integral_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::int32_type, {2, 2, 2}};
const int num_elms = 8;
......@@ -1074,7 +1074,7 @@ TEST_CASE(mean_integral_test)
TEST_CASE(mod_test)
{
migraphx::program p = migraphx::parse_onnx("mod_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::int32_type, {3, 3, 3}};
......@@ -1101,7 +1101,7 @@ TEST_CASE(mod_test)
TEST_CASE(mod_test_different_types)
{
migraphx::program p = migraphx::parse_onnx("mod_test_different_dtypes.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_int16{migraphx::shape::int16_type, {3, 3, 3}};
migraphx::shape s_int32{migraphx::shape::int32_type, {3, 3, 3}};
......@@ -1129,7 +1129,7 @@ TEST_CASE(mod_test_different_types)
TEST_CASE(mod_test_fmod)
{
migraphx::program p = migraphx::parse_onnx("mod_test_fmod.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}};
......@@ -1158,7 +1158,7 @@ TEST_CASE(mod_test_fmod)
TEST_CASE(mod_test_fmod_different_types)
{
migraphx::program p = migraphx::parse_onnx("mod_test_fmod_different_dtypes.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}};
migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}};
......@@ -1188,7 +1188,7 @@ TEST_CASE(mod_test_fmod_different_types)
TEST_CASE(nonzero_test)
{
migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::bool_type, {2, 2}};
std::vector<char> data = {1, 1, 1, 0};
......@@ -1207,7 +1207,7 @@ TEST_CASE(nonzero_test)
TEST_CASE(resize_downsample_f_test)
{
migraphx::program p = migraphx::parse_onnx("resize_downsample_f_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 4}};
std::vector<float> dx(sx.elements());
......@@ -1228,7 +1228,7 @@ TEST_CASE(resize_downsample_f_test)
TEST_CASE(resize_upsample_linear_ac_test)
{
migraphx::program p = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 2}};
std::vector<float> dx = {1.0f, 2.0f, 3.0f, 4.0f};
......@@ -1263,7 +1263,7 @@ TEST_CASE(resize_upsample_linear_ac_test)
TEST_CASE(resize_upsample_linear_test)
{
migraphx::program p = migraphx::parse_onnx("resize_upsample_linear_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 2}};
std::vector<float> dx = {1.0f, 2.0f, 3.0f, 4.0f};
......@@ -1284,7 +1284,7 @@ TEST_CASE(resize_upsample_linear_test)
TEST_CASE(resize_upsample_pf_test)
{
migraphx::program p = migraphx::parse_onnx("resize_upsample_pf_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 2}};
std::vector<float> dx = {1.0f, 2.0f, 3.0f, 4.0f};
......@@ -1305,7 +1305,7 @@ TEST_CASE(resize_upsample_pf_test)
TEST_CASE(reversesequence_4D_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_4D_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::float_type, {2, 2, 2, 2}};
std::vector<float> x_data = {
......@@ -1326,7 +1326,7 @@ TEST_CASE(reversesequence_4D_verify_test)
TEST_CASE(reversesequence_batch_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_batch_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::float_type, {4, 4}};
std::vector<float> x_data = {
......@@ -1347,7 +1347,7 @@ TEST_CASE(reversesequence_batch_verify_test)
TEST_CASE(reversesequence_time_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_time_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::float_type, {4, 4}};
std::vector<float> x_data = {
......@@ -1368,7 +1368,7 @@ TEST_CASE(reversesequence_time_verify_test)
TEST_CASE(selu_test)
{
migraphx::program p = migraphx::parse_onnx("selu_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::double_type, {2, 3}};
std::vector<double> x_data = {1.1, 2.1, 0.0, -1.3, -5.3, 12.0};
......@@ -1388,7 +1388,7 @@ TEST_CASE(selu_test)
TEST_CASE(size_verify_test)
{
migraphx::program p = migraphx::parse_onnx("size_verify_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 5, 3}};
std::vector<float> data(30, 1.);
......@@ -1403,7 +1403,7 @@ TEST_CASE(size_verify_test)
TEST_CASE(slice_test)
{
migraphx::program p = migraphx::parse_onnx("slice_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_data{migraphx::shape::float_type, {3, 2}};
std::vector<float> data = {0, 1, 2, 3, 4, 5};
......@@ -1422,7 +1422,7 @@ TEST_CASE(slice_test)
TEST_CASE(slice_5arg_test)
{
migraphx::program p = migraphx::parse_onnx("slice_5arg_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_data{migraphx::shape::float_type, {5, 5}}; // start
std::vector<float> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
......@@ -1442,7 +1442,7 @@ TEST_CASE(slice_5arg_test)
TEST_CASE(slice_reverse_test)
{
migraphx::program p = migraphx::parse_onnx("slice_5arg_reverse_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_data{migraphx::shape::float_type, {5, 5}}; // start
std::vector<float> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
......@@ -1462,7 +1462,7 @@ TEST_CASE(slice_reverse_test)
TEST_CASE(slice_step_test)
{
migraphx::program p = migraphx::parse_onnx("slice_5arg_step_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_data{migraphx::shape::float_type, {5, 5}}; // start
std::vector<float> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
......@@ -1482,7 +1482,7 @@ TEST_CASE(slice_step_test)
TEST_CASE(softplus_test)
{
migraphx::program p = migraphx::parse_onnx("softplus_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {5}};
std::vector<float> data = {0, 1, 2, 3, 4};
......@@ -1503,7 +1503,7 @@ TEST_CASE(softplus_test)
TEST_CASE(softsign_test)
{
migraphx::program p = migraphx::parse_onnx("softsign_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {5}};
std::vector<float> data = {0, 1, 2, 3, 4};
......@@ -1543,7 +1543,7 @@ TEST_CASE(upsample_test)
TEST_CASE(where_test)
{
migraphx::program p = migraphx::parse_onnx("where_test.onnx");
p.compile(migraphx::ref::target{});
p.compile(migraphx::make_target("ref"));
migraphx::shape c_shape{migraphx::shape::bool_type, {2}};
std::vector<int8_t> c_data = {1, 0};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment