"git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "9a1a92f535108e73891b43ba4738d118f5322205"
Unverified Commit 2ba401f0 authored by Ted Themistokleous's avatar Ted Themistokleous Committed by GitHub
Browse files

Merge branch 'simplify_1_mul_div_ops' into divide_by_zero_check

parents a330d428 8398fb19
...@@ -32,6 +32,17 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR) ...@@ -32,6 +32,17 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
add_dependencies(check ${NAME}) add_dependencies(check ${NAME})
endfunction() endfunction()
# Workaround: C file dont work with clang-tidy right now, need a fix in rocm-cmake
function(add_c_api_test TEST_NAME TEST_SRC TEST_DIR)
set(NAME test_api_${TEST_NAME})
add_executable(${NAME} EXCLUDE_FROM_ALL ${TEST_SRC})
target_link_libraries(${NAME} migraphx_c migraphx)
target_include_directories(${NAME} PUBLIC ../include)
add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_DIR})
add_dependencies(tests ${NAME})
add_dependencies(check ${NAME})
endfunction()
add_api_test(array_base test_array_base.cpp ${TEST_ONNX_DIR}) add_api_test(array_base test_array_base.cpp ${TEST_ONNX_DIR})
add_api_test(assign test_assign.cpp ${TEST_ONNX_DIR}) add_api_test(assign test_assign.cpp ${TEST_ONNX_DIR})
add_api_test(compile_options test_compile_options.cpp ${TEST_ONNX_DIR}) add_api_test(compile_options test_compile_options.cpp ${TEST_ONNX_DIR})
...@@ -40,6 +51,7 @@ add_api_test(module_construct test_module_construct.cpp ${TEST_ONNX_DIR}) ...@@ -40,6 +51,7 @@ add_api_test(module_construct test_module_construct.cpp ${TEST_ONNX_DIR})
add_api_test(ref test_cpu.cpp ${TEST_ONNX_DIR}) add_api_test(ref test_cpu.cpp ${TEST_ONNX_DIR})
add_api_test(save_load test_save_load.cpp ${TEST_ONNX_DIR}) add_api_test(save_load test_save_load.cpp ${TEST_ONNX_DIR})
add_api_test(op test_op_construct.cpp ${TEST_ONNX_DIR}) add_api_test(op test_op_construct.cpp ${TEST_ONNX_DIR})
add_c_api_test(c_op test_c_op_construct.c ${TEST_ONNX_DIR})
add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR}) add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR})
add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR}) add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR})
# GPU-based tests # GPU-based tests
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/migraphx.h>
#include <string.h>
void expect_equal(const char* x, const char* y)
{
if(strcmp(x, y) != 0)
abort();
}
int main()
{
char name[1024];
migraphx_operation_t op;
migraphx_operation_create(&op, "add", 0);
migraphx_operation_name(name, 1024, op);
migraphx_operation_destroy(op);
expect_equal(name, "add");
}
...@@ -23,8 +23,10 @@ ...@@ -23,8 +23,10 @@
*/ */
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <exception>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <stdexcept>
#include "test.hpp" #include "test.hpp"
struct sigmoid_custom_op final : migraphx::experimental_custom_op_base struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
...@@ -43,10 +45,22 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base ...@@ -43,10 +45,22 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
CHECK(inputs.size() == 2); if(inputs.size() != 2)
CHECK(inputs[0].lengths().size() == 1); {
CHECK(inputs[0].type() == migraphx_shape_float_type); throw std::runtime_error("op must have two inputs");
CHECK(bool{inputs[0] == inputs[1]}); }
if(inputs[0].lengths().size() != 1)
{
throw std::runtime_error("input arg must be a vector or scalar");
}
if(inputs[0].type() != migraphx_shape_float_type)
{
throw std::runtime_error("input arg must be of type float");
}
if(inputs[0] != inputs[1])
{
throw std::runtime_error("input arg and buffer allocation must be of same shape");
}
return inputs.back(); return inputs.back();
} }
}; };
...@@ -83,4 +97,18 @@ TEST_CASE(run_sigmoid_custom_op) ...@@ -83,4 +97,18 @@ TEST_CASE(run_sigmoid_custom_op)
EXPECT(bool{result == migraphx::argument(s, expected_result.data())}); EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
} }
extern "C" void migraphx_test_private_disable_exception_catch(bool b);
TEST_CASE(run_sigmoid_with_incorrect_shape)
{
migraphx::program p;
migraphx::shape s{migraphx_shape_float_type, {12}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
migraphx_test_private_disable_exception_catch(true);
EXPECT(test::throws<std::exception>(
[&] { m.add_instruction(migraphx::operation("sigmoid_custom_op"), {x}); },
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs"));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <stdexcept>
#include "test.hpp" #include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess)) #define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
...@@ -54,6 +55,14 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base ...@@ -54,6 +55,14 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(!inputs[0].standard())
{
throw std::runtime_error("first arg must be standard shaped");
}
if(inputs.size() != 2)
{
throw std::runtime_error("number of inputs must be 2");
}
return inputs.back(); return inputs.back();
} }
}; };
...@@ -64,12 +73,17 @@ TEST_CASE(run_simple_custom_op) ...@@ -64,12 +73,17 @@ TEST_CASE(run_simple_custom_op)
migraphx::register_experimental_custom_op(simple_op); migraphx::register_experimental_custom_op(simple_op);
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx_shape_int32_type, {4, 3}}; migraphx::shape s{migraphx_shape_int32_type, {4, 3}};
migraphx::shape trans_shape{migraphx_shape_int32_type, {3, 4}};
migraphx::module m = p.get_main_module(); migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s); auto x = m.add_parameter("x", s);
auto neg = m.add_instruction(migraphx::operation("neg"), x); auto neg = m.add_instruction(migraphx::operation("neg"), x);
auto alloc = m.add_allocation(s); auto alloc = m.add_allocation(trans_shape);
auto custom_kernel = m.add_instruction(migraphx::operation("simple_custom_op"), {neg, alloc}); auto neg_trans =
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel); m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg});
auto neg_cont = m.add_instruction(migraphx::operation("contiguous"), {neg_trans});
auto custom_kernel =
m.add_instruction(migraphx::operation("simple_custom_op"), {neg_cont, alloc});
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel);
m.add_return({relu}); m.add_return({relu});
migraphx::compile_options options; migraphx::compile_options options;
options.set_offload_copy(); options.set_offload_copy();
...@@ -82,7 +96,7 @@ TEST_CASE(run_simple_custom_op) ...@@ -82,7 +96,7 @@ TEST_CASE(run_simple_custom_op)
auto result_vec = result.as_vector<int>(); auto result_vec = result.as_vector<int>();
std::vector<int> expected_result(12, 0); std::vector<int> expected_result(12, 0);
std::fill(expected_result.begin() + 6, expected_result.end(), 3); std::fill(expected_result.begin() + 6, expected_result.end(), 3);
EXPECT(bool{result == migraphx::argument(s, expected_result.data())}); EXPECT(bool{result == migraphx::argument(trans_shape, expected_result.data())});
} }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/check_shapes.hpp>
#include <migraphx/make_op.hpp>
/*!
* Tests for check_shapes object handling dynamic shapes
*/
using migraphx::shape;
bool create_shapes(bool dynamic_allowed)
{
try
{
shape a{shape::int64_type, {3}};
shape b{shape::float_type, {{3, 6, 0}, {4, 4, 0}}};
auto op = migraphx::make_op("add");
migraphx::check_shapes{{a, b}, op, dynamic_allowed}.has(2);
return true;
}
catch(...)
{
return false;
}
}
TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); }
TEST_CASE(fail_dynamic_shape) { EXPECT(!create_shapes(false)); }
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise) ...@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise)
mm->begin(), mm->end(), [](auto&& ins) { return ins.name() == "contiguous"; })); mm->begin(), mm->end(), [](auto&& ins) { return ins.name() == "contiguous"; }));
} }
TEST_CASE(slice_contiguous)
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {4, 2}};
auto x = m.add_parameter("x", s);
auto t = m.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), x);
auto c = m.add_instruction(migraphx::make_op("contiguous"), t);
auto s1 = m.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), c);
auto s2 = m.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), c);
auto c1 = m.add_instruction(migraphx::make_op("contiguous"), s1);
auto c2 = m.add_instruction(migraphx::make_op("contiguous"), s2);
m.add_instruction(pass_standard_op{}, c1, c2);
run_pass(m);
EXPECT(std::count_if(
m.begin(), m.end(), [](auto&& ins) { return ins.name() == "contiguous"; }) == 1);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/fpga/target.hpp>
#include <migraphx/target_assignments.hpp>
migraphx::program create_program()
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s);
auto sum = mm->add_instruction(migraphx::make_op("add"), x, y);
auto sum_2 = mm->add_instruction(migraphx::make_op("add"), sum, z);
mm->add_return({sum_2});
return p;
}
TEST_CASE(compile)
{
auto p = create_program();
auto t = migraphx::make_target("fpga");
p.compile(t);
EXPECT(p.is_compiled());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target_assignments.hpp>
migraphx::program create_program()
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s);
auto diff = mm->add_instruction(migraphx::make_op("div"), x, y);
mm->add_instruction(migraphx::make_op("div"), diff, z);
return p;
}
TEST_CASE(is_supported)
{
auto p = create_program();
auto targets = migraphx::get_targets();
EXPECT(!targets.empty());
auto first_target = targets[0];
auto t = migraphx::make_target(first_target);
const auto assignments = p.get_target_assignments({t});
for(const auto& [ins, target] : assignments)
{
(void)ins;
EXPECT(target == first_target);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/functional.hpp>
#include <test.hpp>
using migraphx::trim;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct mlir_gpu_target : migraphx::gpu::target
{
std::string name() const { return "mlir"; }
std::vector<migraphx::pass> get_passes(migraphx::context& gctx,
const migraphx::compile_options&) const
{
auto& ctx = migraphx::any_cast<migraphx::gpu::context>(gctx);
return {migraphx::gpu::write_literals{&ctx}};
}
};
std::string encode(const std::string& s)
{
std::stringstream ss;
bool prespace = false;
for(auto c : s)
{
if(std::isspace(c) != 0)
{
if(not prespace)
ss << " ";
prespace = true;
}
else if(std::isprint(c) != 0)
{
ss << c;
prespace = false;
}
}
return migraphx::trim(ss.str());
}
migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto names = mmlir.get_parameter_names();
std::vector<migraphx::instruction_ref> inputs;
std::transform(names.begin(), names.end(), std::back_inserter(inputs), [&](const auto& name) {
return mm->add_parameter(name, mmlir.get_parameter_shape(name));
});
std::sort(inputs.begin(), inputs.end(), migraphx::by(std::less<>{}, [](auto ins) {
return to_string(ins->get_operator());
}));
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir), inputs);
return p;
}
migraphx::parameter_map generate_params(const migraphx::program& p)
{
migraphx::parameter_map m;
std::size_t i = 0;
for(auto&& x : p.get_parameter_shapes())
{
// m[x.first] = migraphx::fill_argument(x.second, 1);
m[x.first] = migraphx::generate_argument(x.second, i++);
}
return m;
}
migraphx::argument run_gpu(migraphx::program p, const migraphx::parameter_map& inputs)
{
mlir_gpu_target t;
p.compile(t);
migraphx::parameter_map m;
for(auto&& input : inputs)
{
m[input.first] = t.copy_to(input.second);
}
for(auto&& x : p.get_parameter_shapes())
{
if(m.count(x.first) == 0)
{
m[x.first] = t.allocate(x.second);
}
}
return t.copy_from(p.eval(m).front());
}
migraphx::argument run_ref(migraphx::program p, const migraphx::parameter_map& inputs)
{
p.compile(migraphx::ref::target{});
return p.eval(inputs).front();
}
bool verify_mlir(const migraphx::module& mmlir)
{
migraphx::program ref;
ref.get_main_module()->insert_instructions(ref.get_main_module()->end(), &mmlir);
auto inputs = generate_params(ref);
auto mlir = create_program_from_mlir(mmlir);
return migraphx::verify_args("mlir", run_ref(ref, inputs), run_gpu(mlir, inputs));
}
TEST_CASE(conv)
{
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
)__migraphx__";
migraphx::module m;
auto x = m.add_parameter("x", {migraphx::shape::float_type, {1, 8, 4, 4}});
auto w = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
auto conv = m.add_instruction(migraphx::make_op("convolution"), x, w);
m.add_return({conv});
auto s = migraphx::gpu::dump_mlir(m);
// Skip test if MLIR is not enabled
if(s.empty())
return;
CHECK(encode(s) == encode(mlir_output));
EXPECT(verify_mlir(m));
}
TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
}
}
)__migraphx__";
migraphx::module m;
auto x = m.add_parameter("x", {migraphx::shape::float_type, {1, 8, 4, 4}});
auto w = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
auto b = m.add_parameter("b", {migraphx::shape::float_type, {1, 2, 2, 2}});
auto conv = m.add_instruction(migraphx::make_op("convolution"), x, w);
auto add = m.add_instruction(migraphx::make_op("add"), conv, b);
auto relu = m.add_instruction(migraphx::make_op("relu"), add);
m.add_return({relu});
auto s = migraphx::gpu::dump_mlir(m);
// Skip test if MLIR is not enabled
if(s.empty())
return;
CHECK(encode(s) == encode(mlir_output));
EXPECT(verify_mlir(m));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -3086,7 +3086,7 @@ def max_test(): ...@@ -3086,7 +3086,7 @@ def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Max', 'Max',
...@@ -3220,7 +3220,7 @@ def min_test(): ...@@ -3220,7 +3220,7 @@ def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Min', 'Min',
......
 max-example:e max_test:a
 
0 0
1 1
23"Max test-dropoutZ 23"Maxmax_testZ
0 0
 
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
 
b b
2 3
 
B B
\ No newline at end of file \ No newline at end of file
 min-example:e min_test:a
 
0 0
1 1
23"Min test-dropoutZ 23"Minmin_testZ
0 0
 
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
 
b b
2 3
 
B B
\ No newline at end of file \ No newline at end of file
...@@ -2832,7 +2832,9 @@ TEST_CASE(max_test) ...@@ -2832,7 +2832,9 @@ TEST_CASE(max_test)
auto l0 = mm->add_instruction(migraphx::make_op("max"), input0, input1); auto l0 = mm->add_instruction(migraphx::make_op("max"), input0, input1);
mm->add_instruction(migraphx::make_op("max"), l0, input2); mm->add_instruction(migraphx::make_op("max"), l0, input2);
optimize_onnx("max_test.onnx"); auto prog = optimize_onnx("max_test.onnx");
EXPECT(p == prog);
} }
TEST_CASE(maxpool_notset_test) TEST_CASE(maxpool_notset_test)
...@@ -2947,7 +2949,9 @@ TEST_CASE(min_test) ...@@ -2947,7 +2949,9 @@ TEST_CASE(min_test)
auto l0 = mm->add_instruction(migraphx::make_op("min"), input0, input1); auto l0 = mm->add_instruction(migraphx::make_op("min"), input0, input1);
mm->add_instruction(migraphx::make_op("min"), l0, input2); mm->add_instruction(migraphx::make_op("min"), l0, input2);
optimize_onnx("min_test.onnx"); auto prog = optimize_onnx("min_test.onnx");
EXPECT(p == prog);
} }
TEST_CASE(multinomial_test) TEST_CASE(multinomial_test)
...@@ -5433,7 +5437,59 @@ TEST_CASE(variable_batch_test) ...@@ -5433,7 +5437,59 @@ TEST_CASE(variable_batch_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(variable_batch_user_input_test) TEST_CASE(variable_batch_user_input_test1)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 16, 16}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 2, 0};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test2)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 5, 0};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test3)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto r = mm->add_instruction(migraphx::make_op("identity"), l0);
mm->add_return({r});
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
auto prog = migraphx::parse_onnx("variable_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(variable_batch_user_input_test4)
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
...@@ -5449,6 +5505,26 @@ TEST_CASE(variable_batch_user_input_test) ...@@ -5449,6 +5505,26 @@ TEST_CASE(variable_batch_user_input_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(variable_batch_user_input_test5)
{
// Error using default_dim_value and default_dyn_dim_value
migraphx::onnx_options options;
options.default_dim_value = 2;
options.default_dyn_dim_value = {1, 2, 0};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
}
TEST_CASE(variable_batch_user_input_test6)
{
// Error using both map_dyn_input_dims and map_input_dims
migraphx::onnx_options options;
options.map_dyn_input_dims["0"] = {{2, 5, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}};
options.map_input_dims["0"] = {2, 3, 16, 16};
EXPECT(test::throws([&] { migraphx::parse_onnx("variable_batch_test.onnx", options); }));
}
TEST_CASE(variable_batch_leq_zero_test) TEST_CASE(variable_batch_leq_zero_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast) ...@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast)
} }
{ {
std::vector<std::size_t> lens{4, 1, 3}; std::vector<std::size_t> lens{4, 1, 3};
migraphx::shape input{migraphx::shape::float_type, {}}; std::vector<std::size_t> empt = {};
migraphx::shape input{migraphx::shape::float_type, empt};
throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input); throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input);
} }
{ {
...@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis) ...@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis)
TEST_CASE(test_unsqueeze) TEST_CASE(test_unsqueeze)
{ {
migraphx::shape s1{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape s1{migraphx::shape::float_type, {4, 5, 3}};
migraphx::shape s2{migraphx::shape::float_type, {4, 3, 1, 3}}; migraphx::shape s2{migraphx::shape::float_type, {4, 5, 1, 3}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1);
} }
TEST_CASE(test_unsqueeze_step)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 5, 12}};
migraphx::shape s2{migraphx::shape::float_type, {4, 5, 2, 6}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {2}}}), s1);
}
TEST_CASE(test_unsqueeze_step_non_divisable)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 5, 3}};
throws_shape(migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {2}}}), s1);
}
TEST_CASE(test_unsqueeze_step_zero)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 5, 12}};
throws_shape(migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {0}}}), s1);
}
TEST_CASE(test_unsqueeze_step_at_end)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 5, 12}};
throws_shape(migraphx::make_op("unsqueeze", {{"axes", {3}}, {"steps", {2}}}), s1);
}
TEST_CASE(test_unsqueeze_mismatch_step_axis)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 5, 12}};
throws_shape(migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {2, 3}}}), s1);
}
TEST_CASE(test_unsqueeze_negative_axis) TEST_CASE(test_unsqueeze_negative_axis)
{ {
migraphx::shape s1{migraphx::shape::float_type, {4, 3, 3}}; migraphx::shape s1{migraphx::shape::float_type, {4, 5, 3}};
migraphx::shape s2{migraphx::shape::float_type, {4, 3, 1, 3}}; migraphx::shape s2{migraphx::shape::float_type, {4, 5, 1, 3}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {-2}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {-2}}}), s1);
} }
...@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2) ...@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2)
TEST_CASE(test_unsqueeze_transpose) TEST_CASE(test_unsqueeze_transpose)
{ {
migraphx::shape s1{migraphx::shape::float_type, {4, 4, 3}, {12, 1, 4}}; migraphx::shape s1{migraphx::shape::float_type, {4, 4, 3}, {12, 1, 4}};
migraphx::shape s2{migraphx::shape::float_type, {4, 4, 1, 3}, {12, 1, 1, 4}}; migraphx::shape s2{migraphx::shape::float_type, {4, 4, 1, 3}, {12, 1, 12, 4}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1);
} }
TEST_CASE(test_unsqueeze_transpose_step)
{
migraphx::shape s1{migraphx::shape::float_type, {4, 4, 6}, {24, 1, 4}};
migraphx::shape s2{migraphx::shape::float_type, {4, 4, 2, 3}, {24, 1, 12, 4}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}, {"steps", {2}}}), s1);
}
TEST_CASE(test_unsqueeze_multibroadcast) TEST_CASE(test_unsqueeze_multibroadcast)
{ {
migraphx::shape s1{migraphx::shape::float_type, {2, 3, 4}, {0, 1, 0}}; migraphx::shape s1{migraphx::shape::float_type, {2, 3, 4}, {0, 1, 0}};
migraphx::shape s2{migraphx::shape::float_type, {2, 3, 1, 4}, {0, 1, 1, 0}}; migraphx::shape s2{migraphx::shape::float_type, {2, 3, 1, 4}, {0, 1, 0, 0}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1);
} }
TEST_CASE(test_unsqueeze_slice) TEST_CASE(test_unsqueeze_slice)
{ {
migraphx::shape s1{migraphx::shape::float_type, {2, 3, 4}, {108, 36, 1}}; migraphx::shape s1{migraphx::shape::float_type, {2, 3, 4}, {108, 36, 1}};
migraphx::shape s2{migraphx::shape::float_type, {2, 3, 1, 4}, {108, 36, 36, 1}}; migraphx::shape s2{migraphx::shape::float_type, {2, 3, 1, 4}, {108, 36, 4, 1}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2}}}), s1);
} }
...@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2) ...@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2)
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), s1); expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), s1);
} }
TEST_CASE(test_unsqueeze_multiple_axes_3)
{
migraphx::shape s1{migraphx::shape::float_type, {3, 4, 5}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4, 1, 5, 1, 1}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2, 4, 5}}}), s1);
}
TEST_CASE(test_unsqueeze_multiple_axes_4)
{
migraphx::shape s1{migraphx::shape::float_type, {3, 4, 5}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4, 1, 5, 1, 1}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {5, 4, 2}}}), s1);
}
TEST_CASE(test_unsqueeze_multiple_axes_step)
{
migraphx::shape s1{migraphx::shape::float_type, {3, 4, 10}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4, 2, 5, 1, 1}};
expect_shape(s2, migraphx::make_op("unsqueeze", {{"axes", {2, 4, 5}}, {"steps", {2}}}), s1);
}
TEST_CASE(transpose_shape) TEST_CASE(transpose_shape)
{ {
migraphx::shape input{migraphx::shape::float_type, {2, 2}}; migraphx::shape input{migraphx::shape::float_type, {2, 2}};
......
...@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default) ...@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default)
EXPECT(s.elements() == 0); EXPECT(s.elements() == 0);
EXPECT(s.bytes() == 0); EXPECT(s.bytes() == 0);
} }
TEST_CASE(test_shape_assign) TEST_CASE(test_shape_assign)
{ {
migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}}; migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}};
...@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard) ...@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard)
EXPECT(not s.broadcasted()); EXPECT(not s.broadcasted());
} }
TEST_CASE(test_shape_min_max_opt)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2, 3}, {6, 3, 1}};
EXPECT(s.min_lens() == s.lens());
EXPECT(s.max_lens() == s.lens());
EXPECT(s.opt_lens() == s.lens());
}
TEST_CASE(test_shape_dynamic_fixed)
{
migraphx::shape s{migraphx::shape::float_type, {{2, 2, 0}, {2, 2, 0}, {3, 3, 0}}};
EXPECT(not s.standard());
EXPECT(not s.packed());
EXPECT(not s.transposed());
EXPECT(not s.broadcasted());
EXPECT(s.dynamic());
EXPECT(s.dyn_dims().size() == 3);
EXPECT(s.dyn_dims().at(0).is_fixed());
EXPECT(not s.dyn_dims().at(0).has_optimal());
EXPECT(s.min_lens() == std::vector<std::size_t>{2, 2, 3});
EXPECT(s.max_lens() == std::vector<std::size_t>{2, 2, 3});
EXPECT(s.opt_lens() == std::vector<std::size_t>{0, 0, 0});
EXPECT(s.bytes() == 2 * 2 * 3 * sizeof(float));
}
TEST_CASE(test_shape_dynamic_not_fixed)
{
using migraphx::shape;
std::vector<shape::dynamic_dimension> dims = {};
dims.push_back(shape::dynamic_dimension{2, 5, 2});
dims.push_back(shape::dynamic_dimension{2, 8, 0});
migraphx::shape s{migraphx::shape::float_type, dims};
EXPECT(not s.standard());
EXPECT(not s.packed());
EXPECT(not s.transposed());
EXPECT(not s.broadcasted());
EXPECT(s.dynamic());
EXPECT(s.dyn_dims().size() == 2);
EXPECT(not s.dyn_dims().at(0).is_fixed());
EXPECT(s.dyn_dims().at(0).has_optimal());
EXPECT(s.min_lens() == std::vector<std::size_t>{2, 2});
EXPECT(s.max_lens() == std::vector<std::size_t>{5, 8});
EXPECT(s.opt_lens() == std::vector<std::size_t>{2, 0});
EXPECT(s.bytes() == 5 * 8 * sizeof(float));
}
TEST_CASE(test_shape_dynamic_compares)
{
using migraphx::shape;
auto a = shape::dynamic_dimension{2, 5, 2};
auto b = a;
auto c = shape::dynamic_dimension{2, 5, 2};
auto d = shape::dynamic_dimension{3, 8, 4};
EXPECT(a == b);
EXPECT(a == c);
EXPECT(a != d);
migraphx::shape s0{shape::float_type, {a, d}};
migraphx::shape s1 = s0;
migraphx::shape s2{shape::float_type, {a, d}};
migraphx::shape s3{shape::int32_type, {a}};
EXPECT(s0 == s1);
EXPECT(s0 == s2);
EXPECT(s0 != s3);
std::stringstream ss0;
std::stringstream ss1;
std::stringstream ss3;
ss0 << s0;
ss1 << s1;
ss3 << s3;
EXPECT(ss0.str() == ss1.str());
EXPECT(ss0.str() != ss3.str());
}
TEST_CASE(test_shape_dynamic_errors)
{
using migraphx::shape;
std::vector<shape::dynamic_dimension> dims = {};
dims.push_back(shape::dynamic_dimension{2, 5, 2});
dims.push_back(shape::dynamic_dimension{2, 8, 0});
migraphx::shape s{shape::float_type, dims};
EXPECT(test::throws([&] { s.elements(); }));
EXPECT(test::throws([&] { s.index({0, 1}); }));
EXPECT(test::throws([&] { s.index(1); }));
EXPECT(test::throws([&] { s.index(std::vector<std::size_t>{0, 1}); }));
EXPECT(test::throws([&] { s.with_lens({3, 5}); }));
EXPECT(test::throws([&] { s.with_lens(shape::float_type, {3, 5}); }));
}
TEST_CASE(test_shape_dynamic_serialize)
{
using migraphx::shape;
std::vector<shape::dynamic_dimension> dims1 = {};
dims1.push_back(shape::dynamic_dimension{2, 5, 2});
dims1.push_back(shape::dynamic_dimension{2, 8, 0});
migraphx::shape s1{shape::float_type, dims1};
auto v1 = migraphx::to_value(s1);
std::vector<shape::dynamic_dimension> dims2 = {};
dims2.push_back(shape::dynamic_dimension{2, 5, 2});
migraphx::shape s2{shape::uint64_type, dims2};
auto v2 = migraphx::to_value(s2);
EXPECT(v1 != v2);
auto s3 = migraphx::from_value<shape>(v1);
EXPECT(s3 == s1);
auto s4 = migraphx::from_value<shape>(v2);
EXPECT(s4 == s2);
EXPECT(s3 != s4);
}
TEST_CASE(test_shape_packed) TEST_CASE(test_shape_packed)
{ {
migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}}; migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}};
......
...@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast) ...@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast)
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
TEST_CASE(transpose_unsqueeze_concat)
{
migraphx::module m1;
{
auto l0 = m1.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt0 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), l0);
auto l1 = m1.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), l1);
auto l2 = m1.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt2 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), l2);
std::vector<migraphx::instruction_ref> args{lt0, lt1, lt2};
std::vector<migraphx::instruction_ref> unsqueezed_args;
int64_t axis = 3;
std::transform(
args.begin(),
args.end(),
std::back_inserter(unsqueezed_args),
[&](migraphx::instruction_ref arg) {
return m1.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {axis}}}), arg);
});
m1.add_instruction(migraphx::make_op("concat", {{"axis", axis}}), unsqueezed_args);
}
// TODO: This could be simplified to a single transpose after concat
migraphx::module m2 = m1;
run_pass(m1);
EXPECT(m1 == m2);
}
TEST_CASE(transpose_slice)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {1, 384, 36, 64}});
auto slice1 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {12}}}), x);
auto transpose1 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice1);
auto slice2 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {12}}, {"ends", {24}}}), x);
auto transpose2 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice2);
auto slice3 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {24}}, {"ends", {36}}}), x);
auto transpose3 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice3);
m1.add_return({transpose1, transpose2, transpose3});
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::float_type, {1, 384, 36, 64}});
auto transpose =
m2.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), x);
auto slice1 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {12}}}),
transpose);
auto slice2 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {12}}, {"ends", {24}}}),
transpose);
auto slice3 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {24}}, {"ends", {36}}}),
transpose);
m2.add_return({slice1, slice2, slice3});
}
EXPECT(m1 == m2);
}
TEST_CASE(transpose_slice_diff_perm)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {1, 384, 36, 64}});
auto slice1 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {12}}}), x);
auto transpose1 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice1);
auto slice2 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {12}}, {"ends", {24}}}), x);
auto transpose2 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), slice2);
auto slice3 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {24}}, {"ends", {36}}}), x);
auto transpose3 = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice3);
m1.add_return({transpose1, transpose2, transpose3});
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::float_type, {1, 384, 36, 64}});
auto transpose =
m2.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), x);
auto slice1 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {0}}, {"ends", {12}}}),
transpose);
auto slice2 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {12}}, {"ends", {24}}}),
transpose);
auto transpose2 = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), slice2);
auto slice3 = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {1}}, {"starts", {24}}, {"ends", {36}}}),
transpose);
m2.add_return({slice1, transpose2, slice3});
}
EXPECT(m1 == m2);
}
TEST_CASE(transpose_slice_single_transpose)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::float_type, {1, 384, 36, 64}});
auto slice1 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {12}}}), x);
auto sqrt1 = m1.add_instruction(migraphx::make_op("sqrt"), slice1);
auto slice2 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {12}}, {"ends", {24}}}), x);
auto transpose = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice2);
auto slice3 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {24}}, {"ends", {36}}}), x);
auto sqrt3 = m1.add_instruction(migraphx::make_op("sqrt"), slice3);
m1.add_return({sqrt1, transpose, sqrt3});
}
migraphx::module m2 = m1;
run_pass(m1);
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/verify_args.hpp> #include <migraphx/verify_args.hpp>
#include <set> #include <set>
...@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f ...@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
return std::async(std::launch::deferred, std::forward<Function>(f)); return std::async(std::launch::deferred, std::forward<Function>(f));
} }
inline void verify_load_save(const migraphx::program& p)
{
migraphx::tmp_dir td{"migraphx_test"};
auto path = td.path / "test.mxr";
migraphx::save(p, path.string());
auto loaded = migraphx::load(path.string());
EXPECT(p == loaded);
}
inline void compile_check(migraphx::program& p, const migraphx::target& t, bool show_trace = false) inline void compile_check(migraphx::program& p, const migraphx::target& t, bool show_trace = false)
{ {
auto name = t.name(); auto name = t.name();
...@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool ...@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw std::runtime_error("Compiling program with " + name + " alters its shape"); throw std::runtime_error("Compiling program with " + name + " alters its shape");
} }
} }
if(t.name() != "ref")
verify_load_save(p);
} }
target_info run_verify::get_target_info(const std::string& name) const target_info run_verify::get_target_info(const std::string& name) const
...@@ -152,10 +164,12 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con ...@@ -152,10 +164,12 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print::set_terminate_handler(name); auto_print::set_terminate_handler(name);
if(migraphx::enabled(MIGRAPHX_DUMP_TEST{})) if(migraphx::enabled(MIGRAPHX_DUMP_TEST{}))
migraphx::save(p, name + ".mxr"); migraphx::save(p, name + ".mxr");
verify_load_save(p);
std::vector<std::string> target_names; std::vector<std::string> target_names;
for(const auto& tname : migraphx::get_targets()) for(const auto& tname : migraphx::get_targets())
{ {
if(tname == "ref") // TODO(varunsh): once verify tests can run, remove fpga
if(tname == "ref" || tname == "fpga")
continue; continue;
// if tests disabled, skip running it // if tests disabled, skip running it
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct test_conv_add_relu : verify_program<test_conv_add_relu>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto weights =
mm->add_parameter("w", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto bias_literal = migraphx::literal{migraphx::shape{migraphx::shape::float_type, {4}},
{2.0f, 2.0f, 2.0f, 2.0f}};
auto bias = mm->add_literal(bias_literal);
auto conv = mm->add_instruction(migraphx::make_op("convolution"), input, weights);
auto bcast_bias = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", conv->get_shape().lens()}}),
bias);
auto bias_add = mm->add_instruction(migraphx::make_op("add"), conv, bcast_bias);
mm->add_instruction(migraphx::make_op("relu"), bias_add);
return p;
}
};
...@@ -197,7 +197,8 @@ class Parameter: ...@@ -197,7 +197,8 @@ class Parameter:
optional: bool = False, optional: bool = False,
returns: bool = False, returns: bool = False,
virtual: bool = False, virtual: bool = False,
this: bool = False) -> None: this: bool = False,
hidden: bool = False) -> None:
self.name = name self.name = name
self.type = Type(type) self.type = Type(type)
self.optional = optional self.optional = optional
...@@ -211,6 +212,7 @@ class Parameter: ...@@ -211,6 +212,7 @@ class Parameter:
self.returns = returns self.returns = returns
self.virtual = virtual self.virtual = virtual
self.this = this self.this = this
self.hidden = hidden
self.bad_param_check: Optional[BadParam] = None self.bad_param_check: Optional[BadParam] = None
self.virtual_read: Optional[List[str]] = None self.virtual_read: Optional[List[str]] = None
self.virtual_write: Optional[str] = None self.virtual_write: Optional[str] = None
...@@ -744,6 +746,8 @@ void destroy(T* x) ...@@ -744,6 +746,8 @@ void destroy(T* x)
{ {
delete x; // NOLINT delete x; // NOLINT
} }
// TODO: Move to interface preamble // TODO: Move to interface preamble
template <class C, class D> template <class C, class D>
struct manage_generic_ptr struct manage_generic_ptr
...@@ -754,23 +758,24 @@ struct manage_generic_ptr ...@@ -754,23 +758,24 @@ struct manage_generic_ptr
{ {
} }
manage_generic_ptr(void* pdata, C pcopier, D pdeleter) manage_generic_ptr(void* pdata, const char* obj_tname, C pcopier, D pdeleter)
: data(nullptr), copier(pcopier), deleter(pdeleter) : data(nullptr), obj_typename(obj_tname), copier(pcopier), deleter(pdeleter)
{ {
copier(&data, pdata); copier(&data, pdata);
} }
manage_generic_ptr(const manage_generic_ptr& rhs) manage_generic_ptr(const manage_generic_ptr& rhs)
: data(nullptr), copier(rhs.copier), deleter(rhs.deleter) : data(nullptr), obj_typename(rhs.obj_typename), copier(rhs.copier), deleter(rhs.deleter)
{ {
if(copier) if(copier)
copier(&data, rhs.data); copier(&data, rhs.data);
} }
manage_generic_ptr(manage_generic_ptr&& other) noexcept manage_generic_ptr(manage_generic_ptr&& other) noexcept
: data(other.data), copier(other.copier), deleter(other.deleter) : data(other.data), obj_typename(other.obj_typename), copier(other.copier), deleter(other.deleter)
{ {
other.data = nullptr; other.data = nullptr;
other.obj_typename = "";
other.copier = nullptr; other.copier = nullptr;
other.deleter = nullptr; other.deleter = nullptr;
} }
...@@ -778,6 +783,7 @@ struct manage_generic_ptr ...@@ -778,6 +783,7 @@ struct manage_generic_ptr
manage_generic_ptr& operator=(manage_generic_ptr rhs) manage_generic_ptr& operator=(manage_generic_ptr rhs)
{ {
std::swap(data, rhs.data); std::swap(data, rhs.data);
std::swap(obj_typename, rhs.obj_typename);
std::swap(copier, rhs.copier); std::swap(copier, rhs.copier);
std::swap(deleter, rhs.deleter); std::swap(deleter, rhs.deleter);
return *this; return *this;
...@@ -790,6 +796,7 @@ struct manage_generic_ptr ...@@ -790,6 +796,7 @@ struct manage_generic_ptr
} }
void* data = nullptr; void* data = nullptr;
const char* obj_typename = "";
C copier = nullptr; C copier = nullptr;
D deleter = nullptr; D deleter = nullptr;
}; };
...@@ -1042,8 +1049,8 @@ interface_handle_definition = Template(''' ...@@ -1042,8 +1049,8 @@ interface_handle_definition = Template('''
extern "C" struct ${ctype}; extern "C" struct ${ctype};
struct ${ctype} { struct ${ctype} {
template<class... Ts> template<class... Ts>
${ctype}(void* p, ${copier} c, ${deleter} d, Ts&&... xs) ${ctype}(void* p, ${copier} c, ${deleter} d, const char* obj_typename, Ts&&... xs)
: object_ptr(p, c, d), xobject(std::forward<Ts>(xs)...) : object_ptr(p, obj_typename, c, d), xobject(std::forward<Ts>(xs)...)
{} {}
manage_generic_ptr<${copier}, ${deleter}> object_ptr = nullptr; manage_generic_ptr<${copier}, ${deleter}> object_ptr = nullptr;
${cpptype} xobject; ${cpptype} xobject;
...@@ -1057,9 +1064,13 @@ ${return_type} ${name}(${params}) const ...@@ -1057,9 +1064,13 @@ ${return_type} ${name}(${params}) const
${output_decls} ${output_decls}
if (${fname} == nullptr) if (${fname} == nullptr)
throw std::runtime_error("${name} function is missing."); throw std::runtime_error("${name} function is missing.");
std::array<char, 256> exception_msg;
exception_msg.front() = '\\0';
auto api_error_result = ${fname}(${args}); auto api_error_result = ${fname}(${args});
if (api_error_result != ${success}) if (api_error_result != ${success}) {
throw std::runtime_error("Error in ${name}."); const std::string exception_str(exception_msg.data());
throw std::runtime_error("Error in ${name} of: " + std::string(object_ptr.obj_typename) + ": " + exception_str);
}
return ${output}; return ${output};
} }
''') ''')
...@@ -1079,7 +1090,9 @@ def generate_virtual_impl(f: Function, fname: str) -> str: ...@@ -1079,7 +1090,9 @@ def generate_virtual_impl(f: Function, fname: str) -> str:
largs += f.returns.virtual_output_args() largs += f.returns.virtual_output_args()
output = f.returns.virtual_output() output = f.returns.virtual_output()
largs += [arg for p in f.params for arg in p.virtual_arg()] largs += [arg for p in f.params for arg in p.virtual_arg()]
lparams += [p.virtual_param() for p in f.params if not p.this] lparams += [
p.virtual_param() for p in f.params if not (p.this or p.hidden)
]
args = ', '.join(largs) args = ', '.join(largs)
params = ', '.join(lparams) params = ', '.join(lparams)
return c_api_virtual_impl.substitute(locals()) return c_api_virtual_impl.substitute(locals())
...@@ -1126,8 +1139,15 @@ class Interface(Handle): ...@@ -1126,8 +1139,15 @@ class Interface(Handle):
# Add this parameter to the function # Add this parameter to the function
this = Parameter('obj', 'void*', this=True) this = Parameter('obj', 'void*', this=True)
this.virtual_read = ['object_ptr.data'] this.virtual_read = ['object_ptr.data']
exception_msg = Parameter('exception_msg', 'char*', hidden=True)
exception_msg.virtual_read = ['${name}.data()']
exception_msg_size = Parameter('exception_msg_size',
'size_t',
hidden=True)
exception_msg_size.virtual_read = ['exception_msg.size()']
f = Function(name, f = Function(name,
params=[this] + (params or []), params=[this, exception_msg, exception_msg_size] +
(params or []),
virtual=True, virtual=True,
**kwargs) **kwargs)
self.ifunctions.append(f) self.ifunctions.append(f)
......
...@@ -39,34 +39,47 @@ ...@@ -39,34 +39,47 @@
#include <migraphx/convert_to_json.hpp> #include <migraphx/convert_to_json.hpp>
#include <algorithm> #include <algorithm>
#include <cstdarg> #include <cstdarg>
namespace migraphx { namespace migraphx {
static thread_local bool disable_exception_catch = false; // NOLINT
extern "C" void migraphx_test_private_disable_exception_catch(bool b)
{
disable_exception_catch = b;
}
template <class F> template <class F>
migraphx_status try_(F f, bool output = true) // NOLINT migraphx_status try_(F f, bool output = true) // NOLINT
{ {
try if(disable_exception_catch)
{ {
f(); f();
} }
catch(const migraphx::exception& ex) else
{ {
if(output) try
std::cerr << "MIGraphX Error: " << ex.what() << std::endl; {
if(ex.error > 0) f();
return migraphx_status(ex.error); }
else catch(const migraphx::exception& ex)
{
if(output)
std::cerr << "MIGraphX Error: " << ex.what() << std::endl;
if(ex.error > 0)
return migraphx_status(ex.error);
else
return migraphx_status_unknown_error;
}
catch(const std::exception& ex)
{
if(output)
std::cerr << "MIGraphX Error: " << ex.what() << std::endl;
return migraphx_status_unknown_error; return migraphx_status_unknown_error;
} }
catch(const std::exception& ex) catch(...)
{ {
if(output) return migraphx_status_unknown_error;
std::cerr << "MIGraphX Error: " << ex.what() << std::endl; }
return migraphx_status_unknown_error;
}
catch(...)
{
return migraphx_status_unknown_error;
} }
return migraphx_status_success; return migraphx_status_success;
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment