Unverified Commit 03b03f48 authored by Artur Wojcik's avatar Artur Wojcik Committed by GitHub
Browse files

Merge branch 'develop' into windows_cxx_compilation

parents ba2a9a23 42a7e55d
...@@ -281,6 +281,7 @@ jobs: ...@@ -281,6 +281,7 @@ jobs:
-DBUILD_DEV=On \ -DBUILD_DEV=On \
-DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \ -DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \ -DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_CXX_FLAGS="-Werror" \
-DGPU_TARGETS=gfx908 \ -DGPU_TARGETS=gfx908 \
.. ..
make -j$(nproc) tests driver make -j$(nproc) tests driver
......
...@@ -71,7 +71,7 @@ include(ROCMSetupVersion) ...@@ -71,7 +71,7 @@ include(ROCMSetupVersion)
option(BUILD_DEV "Build for development purpose only" OFF) option(BUILD_DEV "Build for development purpose only" OFF)
rocm_setup_version(VERSION 2.7.0) rocm_setup_version(VERSION 2.8.0)
set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}) set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH})
option( BUILD_SHARED_LIBS "Build as a shared library" ON ) option( BUILD_SHARED_LIBS "Build as a shared library" ON )
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_castlike : op_parser<parse_castlike>
{
std::vector<op_desc> operators() const { return {{"CastLike"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
if(not(args.size() == 2))
{
MIGRAPHX_THROW("PARSE_CASTLIKE: CastLike must have exactly 2 inputs!");
}
shape::type_t target_type = args[1]->get_shape().type();
return info.add_instruction(make_op("convert", {{"target_type", target_type}}), args[0]);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -320,7 +320,8 @@ struct parse_resize : op_parser<parse_resize> ...@@ -320,7 +320,8 @@ struct parse_resize : op_parser<parse_resize>
// get the number of dimensions // get the number of dimensions
std::size_t n_dim = out_lens.size(); std::size_t n_dim = out_lens.size();
auto vvv_ind = std::vector(n_dim, std::vector(2, std::vector<size_t>(out_elements))); std::vector<std::vector<std::size_t>> vv_ind(2, std::vector<std::size_t>(out_elements));
std::vector<std::vector<std::vector<std::size_t>>> vvv_ind(n_dim, vv_ind);
std::vector<std::vector<float>> delta(n_dim, std::vector<float>(out_elements)); std::vector<std::vector<float>> delta(n_dim, std::vector<float>(out_elements));
shape_for_each(out_s, [&](const auto& out_idx_v, size_t out_idx) { shape_for_each(out_s, [&](const auto& out_idx_v, size_t out_idx) {
......
 castlike_error_test:M

0out"CastLikecastlike_error_testZ
0



b
out


B
\ No newline at end of file
  castlike_test:[

0
1out"CastLike castlike_testZ
0



Z
1


b
out


B
\ No newline at end of file
...@@ -582,6 +582,29 @@ def cast_test(): ...@@ -582,6 +582,29 @@ def cast_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def castlike_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
target_type = helper.make_tensor_value_info('1', TensorProto.FLOAT, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike',
inputs=['0', '1'],
outputs=['out'])
return ([node], [input, target_type], [output])
@onnx_test()
def castlike_error_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike', inputs=['0'], outputs=['out'])
return ([node], [input], [output])
@onnx_test() @onnx_test()
def ceil_test(): def ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......
...@@ -687,6 +687,26 @@ TEST_CASE(cast_test) ...@@ -687,6 +687,26 @@ TEST_CASE(cast_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(castlike_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {10}});
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {10}});
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l);
auto prog = optimize_onnx("castlike_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(castlike_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("castlike_error_test.onnx"); }));
}
TEST_CASE(ceil_test) TEST_CASE(ceil_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -635,14 +635,8 @@ def disabled_tests_onnx_1_10_0(backend_test): ...@@ -635,14 +635,8 @@ def disabled_tests_onnx_1_10_0(backend_test):
backend_test.exclude(r'test_bernoulli_seed_cpu') backend_test.exclude(r'test_bernoulli_seed_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_cpu') backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_expanded_cpu') backend_test.exclude(r'test_castlike_BFLOAT16_to_FLOAT_expanded_cpu')
backend_test.exclude(r'test_castlike_DOUBLE_to_FLOAT16_cpu')
backend_test.exclude(r'test_castlike_DOUBLE_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT16_to_DOUBLE_cpu')
backend_test.exclude(r'test_castlike_FLOAT16_to_FLOAT_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_cpu') backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_expanded_cpu') backend_test.exclude(r'test_castlike_FLOAT_to_BFLOAT16_expanded_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_DOUBLE_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_FLOAT16_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_cpu') backend_test.exclude(r'test_castlike_FLOAT_to_STRING_cpu')
backend_test.exclude(r'test_castlike_FLOAT_to_STRING_expanded_cpu') backend_test.exclude(r'test_castlike_FLOAT_to_STRING_expanded_cpu')
backend_test.exclude(r'test_castlike_STRING_to_FLOAT_cpu') backend_test.exclude(r'test_castlike_STRING_to_FLOAT_cpu')
...@@ -860,28 +854,8 @@ def disabled_tests_onnx_1_13_0(backend_test): ...@@ -860,28 +854,8 @@ def disabled_tests_onnx_1_13_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_reduction_min_cpu') backend_test.exclude(r'test_scatter_elements_with_reduction_min_cpu')
# The following tests fail due to the CastLike operator being unsupported # The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_elu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_default_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_example_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_expanded_ver18_cpu')
backend_test.exclude(r'test_leakyrelu_default_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_example_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_expanded_cpu')
backend_test.exclude(r'test_selu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_expanded_ver18_cpu')
backend_test.exclude(r'test_shrink_hard_expanded_ver18_cpu')
backend_test.exclude(r'test_shrink_soft_expanded_ver18_cpu')
backend_test.exclude(r'test_split_1d_uneven_split_opset18_cpu') backend_test.exclude(r'test_split_1d_uneven_split_opset18_cpu')
backend_test.exclude(r'test_split_2d_uneven_split_opset18_cpu') backend_test.exclude(r'test_split_2d_uneven_split_opset18_cpu')
backend_test.exclude(r'test_thresholdedrelu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_expanded_ver18_cpu')
backend_test.exclude(r'test_relu_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_expanded_ver18_cpu')
def disabled_tests_onnx_1_14_0(backend_test): def disabled_tests_onnx_1_14_0(backend_test):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment