"src/include/Array.hpp" did not exist on "33b5a8556b68c2cbeba555088b647310779d17e4"
Commit bba8276d authored by Artur Wojcik's avatar Artur Wojcik
Browse files

Merge branch 'develop' into uif2-initial

parents b20487d3 f5411e07
......@@ -281,6 +281,7 @@ jobs:
-DBUILD_DEV=On \
-DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_CXX_FLAGS="-Werror" \
-DGPU_TARGETS=gfx908 \
..
make -j$(nproc) tests driver
......
......@@ -80,7 +80,7 @@ include(ROCMSetupVersion)
option(BUILD_DEV "Build for development purpose only" OFF)
rocm_setup_version(VERSION 2.7.0)
rocm_setup_version(VERSION 2.8.0)
set(MIGRAPHX_SO_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH})
option( BUILD_SHARED_LIBS "Build as a shared library" ON )
......@@ -297,7 +297,6 @@ file(MAKE_DIRECTORY ${DEST_DIR}/lib/onnx_migraphx)
foreach(py_file ${backend_files})
configure_file(${py_file} ${DEST_DIR}/lib/onnx_migraphx/. COPYONLY)
endforeach(py_file)
configure_file(${CMAKE_SOURCE_DIR}/test/py/onnx_backend_test.py ${DEST_DIR}/onnx_backend_test.py COPYONLY)
rocm_create_package(
NAME MIGraphX
......
......@@ -68,13 +68,6 @@ struct convert : unary<convert>
auto y = x;
shape::visit(type, [&](auto as) {
// clamping value between target_type's max and min doesn't work for NaNs,
// WIN32: The standard library from MSVC does implement std::isfinite()
// (used by std::isnan() internally) for floating-point types only
// - there are no additional overloads for integer types, which should be
// treated as doubles according to the C++ specification.
// Reference: https://en.cppreference.com/w/cpp/numeric/math/isfinite
if(std::isnan(static_cast<double>(x)))
{
y = as.nan();
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_castlike : op_parser<parse_castlike>
{
std::vector<op_desc> operators() const { return {{"CastLike"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
if(not(args.size() == 2))
{
MIGRAPHX_THROW("PARSE_CASTLIKE: CastLike must have exactly 2 inputs!");
}
shape::type_t target_type = args[1]->get_shape().type();
return info.add_instruction(make_op("convert", {{"target_type", target_type}}), args[0]);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -49,6 +49,8 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape>
{
MIGRAPHX_THROW("ConstantOfShape: attribute value can contain only 1 elements!");
}
// convert to a scalar literal
l_val = literal(shape{l_val.get_shape().type(), {1}, {0}}, l_val.data());
}
else
{
......@@ -64,30 +66,37 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape>
migraphx::shape s;
// input is empty, output is a scalar
auto type = l_val.get_shape().type();
// empty input tensor, output is a scalar
if(args[0]->get_shape().elements() == 0)
migraphx::argument input = args[0]->eval();
if(not input.empty())
{
s = migraphx::shape{type, {1}, {0}};
// empty input tensor, output is a scalar
if(args[0]->get_shape().elements() == 0)
{
s = migraphx::shape{type, {1}, {0}};
}
else
{
std::vector<std::size_t> dims;
input.visit([&](auto ia) { dims.assign(ia.begin(), ia.end()); });
s = migraphx::shape{type, dims};
}
literal l_out{};
l_val.visit([&](auto val) {
using val_type = std::remove_cv_t<typename decltype(val)::value_type>;
// l_val contains only one element
std::vector<val_type> out_vec(s.elements(), val.front());
l_out = literal(s, out_vec);
});
return info.add_literal(l_out);
}
// has variable input (dynamic shape buffer)
else
{
migraphx::argument in = args[0]->eval();
check_arg_empty(in, "ConstantOfShape: dynamic shape is not supported");
std::vector<std::size_t> dims;
in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
s = migraphx::shape{type, dims};
auto dv_lit = info.add_literal(l_val);
auto alloc_ins =
info.add_instruction(make_op("allocate", {{"buf_type", type}}), args[0]);
return info.add_instruction(make_op("fill"), dv_lit, alloc_ins);
}
literal l_out{};
l_val.visit([&](auto val) {
using val_type = std::remove_cv_t<typename decltype(val)::value_type>;
// l_val contains only one element
std::vector<val_type> out_vec(s.elements(), val.front());
l_out = literal(s, out_vec);
});
return info.add_literal(l_out);
}
}
};
......
......@@ -320,7 +320,8 @@ struct parse_resize : op_parser<parse_resize>
// get the number of dimensions
std::size_t n_dim = out_lens.size();
auto vvv_ind = std::vector(n_dim, std::vector(2, std::vector<size_t>(out_elements)));
std::vector<std::vector<std::size_t>> vv_ind(2, std::vector<std::size_t>(out_elements));
std::vector<std::vector<std::vector<std::size_t>>> vvv_ind(n_dim, vv_ind);
std::vector<std::vector<float>> delta(n_dim, std::vector<float>(out_elements));
shape_for_each(out_s, [&](const auto& out_idx_v, size_t out_idx) {
......
......@@ -81,6 +81,14 @@ inline auto launch(hipStream_t stream, index_int global, index_int local)
using f_type = decltype(f);
dim3 nblocks(global / local);
dim3 nthreads(local);
/*
hipGetLastError() returns error for the first failed HIP call that happened previously.
MIGraphX calls into various backend libraries and failed HIP calls can also happen there.
Calling hipGetLastError() would reset error code to hipSuccess, so that inside MIGraphX
failed call to hipLaunchKernelGGL() can be captured.
*/
hipError_t flush_call = hipGetLastError();
(void)(flush_call);
// cppcheck-suppress UseDeviceLaunch
hipLaunchKernelGGL((launcher<f_type>), nblocks, nthreads, 0, stream, f);
hipError_t kernel_launch_status = hipGetLastError();
......
 castlike_error_test:M

0out"CastLikecastlike_error_testZ
0



b
out


B
\ No newline at end of file
  castlike_test:[

0
1out"CastLike castlike_testZ
0



Z
1


b
out


B
\ No newline at end of file
const_of_shape_default_test:
6shape"Constant*#
value*:B shape_tensor

shapey"ConstantOfShapeconst_of_shape_default_testb
y



B
\ No newline at end of file
const_of_shape_dyn_int64_test:
=
output_dimsy"ConstantOfShape*
value*:
Bvalueconst_of_shape_dyn_int64_testZ
output_dims

b
y



B
\ No newline at end of file
constant-of-shape:
const_of_shape_int64_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor
7
shapey"ConstantOfShape*
value*:
Bvalue constant_of_shapeb
Bvalueconst_of_shape_int64_testb
y




B
B
\ No newline at end of file
constant-of-shape:
!const_of_shape_no_value_attr_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor

shapey"ConstantOfShapeconstant_of_shapeb
shapey"ConstantOfShape!const_of_shape_no_value_attr_testb
y



B
B
\ No newline at end of file
......@@ -582,6 +582,29 @@ def cast_test():
return ([node], [x], [y])
@onnx_test()
def castlike_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
target_type = helper.make_tensor_value_info('1', TensorProto.FLOAT, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike',
inputs=['0', '1'],
outputs=['out'])
return ([node], [input, target_type], [output])
@onnx_test()
def castlike_error_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike', inputs=['0'], outputs=['out'])
return ([node], [input], [output])
@onnx_test()
def ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -1007,9 +1030,9 @@ def const_of_shape_empty_input_test():
[10])
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
vals=empty_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1035,9 +1058,9 @@ def const_of_shape_float_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
......@@ -1055,22 +1078,44 @@ def const_of_shape_float_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_default_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'])
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
......@@ -1084,9 +1129,9 @@ def const_of_shape_int64_test():
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1104,6 +1149,40 @@ def const_of_shape_no_value_attr_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_dyn_float_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.FLOAT, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def const_of_shape_dyn_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def conv_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
......
......@@ -687,6 +687,26 @@ TEST_CASE(cast_test)
EXPECT(p == prog);
}
TEST_CASE(castlike_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {10}});
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {10}});
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l);
auto prog = optimize_onnx("castlike_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(castlike_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("castlike_error_test.onnx"); }));
}
TEST_CASE(ceil_test)
{
migraphx::program p;
......@@ -1040,11 +1060,25 @@ TEST_CASE(constant_one_val_int64_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape output_dims_shape(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(output_dims_shape, {2, 3, 4}));
migraphx::shape output_shape{migraphx::shape::float_type, {2, 3, 4}};
std::vector<float> vec(output_shape.elements(), 0.0);
mm->add_literal(migraphx::literal(output_shape, vec));
auto prog = optimize_onnx("const_of_shape_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
mm->add_literal(migraphx::literal(migraphx::shape::int64_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1057,7 +1091,7 @@ TEST_CASE(const_of_shape_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 10.0f);
......@@ -1071,8 +1105,10 @@ TEST_CASE(const_of_shape_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
// output_dims
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
// constant shape literal
migraphx::shape s(migraphx::shape::int64_type, {2, 3, 4});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1085,7 +1121,7 @@ TEST_CASE(const_of_shape_no_value_attr_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 0.0f);
......@@ -1095,6 +1131,42 @@ TEST_CASE(const_of_shape_no_value_attr_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::float_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_float_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::int64_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::int64_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_int64_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_autopad_fail_test)
{
EXPECT(test::throws([&] { optimize_onnx("conv_autopad_fail_test.onnx"); }));
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment