Commit 7edcd405 authored by umangyadav's avatar umangyadav
Browse files

Merge remote-tracking branch 'upstream/develop' into resnet50_partition

parents 5d0564f4 85e8f164
......@@ -4,62 +4,7 @@
# are installed, and if so, uses the installed version to format
# the staged changes.
base=clang-format-10
format=""
yapf_base=yapf
yapf_format=""
use_yapf=true
# Redirect output to stderr.
exec 1>&2
# check if clang-format is installed
type "$base" >/dev/null 2>&1 && format="$base"
# no versions of clang-format are installed
if [ -z "$format" ]
then
echo "$base is not installed. Pre-commit hook will not be executed."
exit 0
fi
# check if yapf is installed
type "$yapf_base" >/dev/null 2>&1 && yapf_format="$yapf_base"
# no versions of yapf are installed
if [ -z "$yapf_format" ]
then
echo "$yapf_base is not installed. Pre-commit hook for python files will not be executed"
use_yapf=false
fi
# Do everything from top - level
cd $(git rev-parse --show-toplevel)
if git rev-parse --verify HEAD >/dev/null 2>&1
then
against=HEAD
else
# Initial commit: diff against an empty tree object
against=16bbb57
fi
# do the formatting
for file in $(git diff-index --cached --name-only $against | grep -E '\.h$|\.hpp$|\.cpp$|\.cl$|\.c$|\.h\.in$|\.hpp\.in$|\.cpp\.in$|\.py$')
do
if [ -e "$file" ]
then
if [ $(echo $file | grep -E '\.py$') ]
then
if $use_yapf
then
echo "$yapf_format $file"
"$yapf_format" -i "$file"
fi
else
echo "$format $file"
"$format" -i -style=file "$file"
fi
fi
done
python3 tools/format.py -q -i HEAD
......@@ -144,6 +144,8 @@ jobs:
runs-on: ROCM-Ubuntu
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
# In this step, this action saves a list of existing images,
# the cache is created without them in the post run.
......@@ -164,19 +166,8 @@ jobs:
shell: bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data migraphx bash < {0}"
run: |
set -e
find . -iname '*.h' \
-o -iname '*.hpp' \
-o -iname '*.cpp' \
-o -iname '*.h.in' \
-o -iname '*.hpp.in' \
-o -iname '*.cpp.in' \
-o -iname '*.cl' \
-o -iname '*.c' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'clang-format-10 -style=file {} | diff - {}'
find . -iname '*.py' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'yapf {} | diff - {}'
git config --global --add safe.directory /data
python3 tools/format.py origin/${{ github.event_name == 'pull_request' && github.base_ref || 'develop' }}
pyflakes:
runs-on: ubuntu-20.04
......
......@@ -42,7 +42,9 @@ on:
required: true
default: '-r'
concurrency: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}"
concurrency:
group: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}"
cancel-in-progress: true
jobs:
release:
......
......@@ -19,7 +19,6 @@ RUN sh -c "echo 'Package: *\nPin: release o=repo.radeon.com\nPin-priority: 600'
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
apt-utils \
build-essential \
clang-format-10 \
cmake \
curl \
doxygen \
......@@ -97,7 +96,8 @@ RUN /download_models.sh && rm /download_models.sh
# Install latest ccache version
RUN cget -p $PREFIX install facebook/zstd@v1.4.5 -X subdir -DCMAKE_DIR=build/cmake
RUN cget -p $PREFIX install ccache@v4.1 -DENABLE_TESTING=OFF
RUN cget -p /opt/cmake install kitware/cmake@v3.24.3
RUN cget -p /opt/cmake install kitware/cmake@v3.26.4
COPY ./test/onnx/.onnxrt-commit /
......
......@@ -88,9 +88,9 @@ def rocmnodename(name) {
} else if(name == "navi21") {
node_name = "${rocmtest_name} && navi21";
} else if(name == "mi100+") {
node_name = "${rocmtest_name} && (gfx908 || gfx90a)";
node_name = "${rocmtest_name} && (gfx908 || gfx90a) && !vm";
} else if(name == "cdna") {
node_name = "${rocmtest_name} && (gfx908 || gfx90a || vega)";
node_name = "${rocmtest_name} && (gfx908 || gfx90a || vega) && !vm";
} else if(name == "nogpu") {
node_name = "${rocmtest_name} && nogpu";
}
......
......@@ -12,7 +12,6 @@ RUN sh -c 'echo deb [arch=amd64 trusted=yes] http://repo.radeon.com/rocm/apt/5.6
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
apt-utils \
build-essential \
clang-format-10 \
cmake \
curl \
doxygen \
......
......@@ -134,6 +134,7 @@ register_migraphx_ops(
cosh
cos
dequantizelinear
dimensions_of
div
dot
elu
......
......@@ -150,8 +150,8 @@ insert_common_args(module& m, instruction_ref ins, std::vector<instruction_ref>
auto c_type = compute_common_types(input_shapes);
auto c_dyn_dims = compute_common_dyn_dims(input_shapes);
// following should work for a static or dynamic shape
if(inputs[0]->get_shape().dyn_dims() != c_dyn_dims)
auto s0 = inputs[0]->get_shape();
if(not s0.dynamic() or s0.dyn_dims() != c_dyn_dims)
{
inputs[0] = m.insert_instruction(
ins, make_op("multibroadcast", {{"out_dyn_dims", to_value(c_dyn_dims)}}), inputs);
......@@ -159,7 +159,8 @@ insert_common_args(module& m, instruction_ref ins, std::vector<instruction_ref>
std::transform(inputs.begin() + 1, inputs.end(), inputs.begin() + 1, [&](auto input) {
// uses previous input to avoid recalculating the common shape from the
// full set of input shapes at runtime
if(input->get_shape().dyn_dims() != c_dyn_dims)
auto s = input->get_shape();
if(not s.dynamic() or s.dyn_dims() != c_dyn_dims)
{
return m.insert_instruction(
ins,
......
......@@ -66,7 +66,19 @@ struct convert : unary<convert>
auto type = target_type;
return [type](auto x) {
auto y = x;
shape::visit(type, [&](auto as) { y = as(x); });
shape::visit(type, [&](auto as) {
// clamping value between target_type's max and min doesn't work for NaNs,
if(std::isnan(x))
{
y = as.nan();
}
else
{
// clamp overflowing/underflowing values to min()/max() instead of +/-infinity
// during downcasting
y = std::min(std::max(as(x), as.min()), as.max());
}
});
return y;
};
}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#define MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/dyn_output.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Returns the dimensions of the input argument from starting axis to ending axis.
* Atleast `end` must be set to use this operator (set `end` to ndim for default ONNX behavior of
* `Shape` operator) This should only be used for dynamic shapes as this can be simplified to a
* literal for static shapes.
*/
struct dimensions_of
{
std::size_t start = 0;
std::size_t end = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.start, "start"), f(self.end, "end"));
}
std::string name() const { return "dimensions_of"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this, true}.has(1);
if(start >= end)
{
MIGRAPHX_THROW("DIMENSIONS_OF: start >= end. start = " + std::to_string(start) +
", end = " + std::to_string(end));
}
return shape{shape::int64_type, {end - start}};
}
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
auto input_lens = args[0].get_shape().lens();
result.visit([&](auto output) {
std::copy(input_lens.cbegin() + start, input_lens.cbegin() + end, output.begin());
});
return result;
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -69,7 +69,7 @@ struct multibroadcast
auto make_bcast_strides = [&](std::vector<std::size_t> bcast_lens, std::size_t offset) {
std::vector<size_t> bcast_strides(bcast_lens.size(), 0);
for(std::ptrdiff_t i = s0.lens().size() - 1; i >= 0; i--)
for(std::ptrdiff_t i = s0.ndim() - 1; i >= 0; i--)
{
if(bcast_lens[i + offset] == s0.lens()[i])
{
......@@ -84,13 +84,13 @@ struct multibroadcast
if(s0.dynamic())
MIGRAPHX_THROW(
"MULTIBROADCAST: Single dynamic input shape not supported. Use two inputs.");
if(s0.lens().size() > output_lens.size())
if(s0.ndim() > output_lens.size())
{
MIGRAPHX_THROW("MULTIBROADCAST: input dimensions should <= output size");
}
auto offset = output_lens.size() - s0.lens().size();
for(std::ptrdiff_t i = s0.lens().size() - 1; i >= 0; i--)
auto offset = output_lens.size() - s0.ndim();
for(std::ptrdiff_t i = s0.ndim() - 1; i >= 0; i--)
{
if(output_lens[i + offset] != s0.lens()[i] and s0.lens()[i] != 1)
{
......@@ -119,7 +119,7 @@ struct multibroadcast
{
// output_lens will not be set for 2+ input version
auto bcast_lens = compute_common_lens(inputs);
auto offset = bcast_lens.size() - s0.lens().size();
auto offset = bcast_lens.size() - s0.ndim();
auto bcast_strides = make_bcast_strides(bcast_lens, offset);
return {t, std::move(bcast_lens), std::move(bcast_strides)};
}
......
......@@ -251,9 +251,10 @@ auto compute_op(rank<1>,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
F f) -> decltype(x.compute(make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f))
{
return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
}
......@@ -309,9 +310,10 @@ auto compute_op(rank<3>,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
F f) -> decltype(x.compute(make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f))
{
return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
}
......
......@@ -48,6 +48,7 @@
#include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/cosh.hpp>
#include <migraphx/op/cos.hpp>
#include <migraphx/op/dimensions_of.hpp>
#include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -240,6 +240,10 @@ struct MIGRAPHX_EXPORT shape
template <class Iterator>
std::size_t index(Iterator start, Iterator last) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(std::distance(start, last) <= this->lens().size());
assert(this->lens().size() == this->strides().size());
return std::inner_product(start, last, this->strides().begin(), std::size_t{0}); // NOLINT
......
......@@ -57,13 +57,12 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
auto x_rank = x_lens.size();
if(x_rank == 1 or x_rank == 2)
{
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto numer = info.add_broadcastable_binary_op("sub", args[0], args[3]);
auto var_eps = info.add_broadcastable_binary_op("add", args[4], eps);
auto denom = info.add_broadcastable_binary_op("pow", var_eps, rt);
auto div0 = info.add_broadcastable_binary_op("div", numer, denom);
auto r0 = info.add_broadcastable_binary_op("mul", div0, args[1]);
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto x_sub_mean = info.add_broadcastable_binary_op("sub", args[0], args[3]);
auto var_eps = info.add_broadcastable_binary_op("add", args[4], eps);
auto rsqrt = info.add_instruction(make_op("rsqrt"), var_eps);
auto mul0 = info.add_broadcastable_binary_op("mul", args[1], rsqrt);
auto r0 = info.add_broadcastable_binary_op("mul", x_sub_mean, mul0);
return info.add_broadcastable_binary_op("add", r0, args[2]);
}
else if(x_rank > 2)
......@@ -71,7 +70,6 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
// unsqueeze tensors of shape (C) to broadcast correctly
std::vector<int64_t> unsqueeze_axes(x_lens.size() - 2);
std::iota(unsqueeze_axes.begin(), unsqueeze_axes.end(), 1);
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto scale_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[1]);
......@@ -81,11 +79,11 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[3]);
auto var_unsqueeze = info.add_instruction(
migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), args[4]);
auto numer = info.add_broadcastable_binary_op("sub", args[0], mean_unsqueeze);
auto var_eps = info.add_broadcastable_binary_op("add", var_unsqueeze, eps);
auto denom = info.add_broadcastable_binary_op("pow", var_eps, rt);
auto div0 = info.add_broadcastable_binary_op("div", numer, denom);
auto r0 = info.add_broadcastable_binary_op("mul", div0, scale_unsqueeze);
auto x_sub_mean = info.add_broadcastable_binary_op("sub", args[0], mean_unsqueeze);
auto var_eps = info.add_broadcastable_binary_op("add", var_unsqueeze, eps);
auto rsqrt = info.add_instruction(make_op("rsqrt"), var_eps);
auto mul0 = info.add_broadcastable_binary_op("mul", scale_unsqueeze, rsqrt);
auto r0 = info.add_broadcastable_binary_op("mul", x_sub_mean, mul0);
return info.add_broadcastable_binary_op("add", r0, bias_unsqueeze);
}
else
......
......@@ -79,13 +79,11 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
auto x = args[0];
auto scale = args[1];
auto bias = args[2];
auto dims = x->get_shape().lens();
if(not contains(valid_types, dtype))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " + std::to_string(dtype) +
". Valid types are 1 (float), 10 (half), and 11 (double).");
bool dyn_input = x->get_shape().dynamic();
auto ndims = x->get_shape().ndim();
auto ndims = x->get_shape().ndim();
assert(ndims >= 2);
auto kdims = ndims - 2;
std::vector<int64_t> axes(kdims);
......@@ -102,6 +100,12 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
(dtype == shape::half_type and not convert_fp16) ? "reduce_sum" : "reduce_mean";
if(dtype == shape::half_type and not convert_fp16)
{
if(x->get_shape().dynamic())
{
MIGRAPHX_THROW("PARSE_INSTANCENORM: half type not supported with dynamic shape "
"unless convert_fp16 is TRUE");
}
auto dims = x->get_shape().lens();
double n =
std::accumulate(dims.begin() + 2, dims.end(), 1, [&](const auto& i, const auto& j) {
return i * j;
......@@ -122,13 +126,14 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
// both scale and bias.
instruction_ref scale_bcast;
instruction_ref bias_bcast;
if(dyn_input)
if(x->get_shape().dynamic())
{
scale_bcast = info.add_instruction(make_op("broadcast", {{"axis", 1}}), scale, x);
bias_bcast = info.add_instruction(make_op("broadcast", {{"axis", 1}}), bias, x);
}
else
{
auto dims = x->get_shape().lens();
scale_bcast = info.add_instruction(
make_op("broadcast", {{"axis", 1}, {"out_lens", dims}}), scale);
bias_bcast =
......
......@@ -30,8 +30,11 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
// Use a literal instruction to replace the shape since, output of
// shape operator are literals in migraphx
/**
* If static shape input, creates a literal in migraphx.
* If dynamic shape input, creates a dimensions_of operator in migraphx (runtime evaluation of
* shape).
*/
struct parse_shape : op_parser<parse_shape>
{
std::vector<op_desc> operators() const { return {{"Shape"}}; }
......@@ -43,13 +46,54 @@ struct parse_shape : op_parser<parse_shape>
{
if(args.size() != 1)
MIGRAPHX_THROW("Shape: operator should have 1 operand");
std::vector<std::size_t> arg_shape = args[0]->get_shape().lens();
std::vector<int64_t> vec_shape(arg_shape.size());
migraphx::shape s(migraphx::shape::int64_type, {arg_shape.size()});
std::transform(arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) {
return int64_t(i);
});
return info.add_literal(migraphx::literal{s, vec_shape});
auto input_shape = args[0]->get_shape();
int input_ndim = input_shape.ndim();
std::size_t start = 0;
std::size_t end = input_ndim;
// Normalizing the start and end is handled here because of how the static shape version
// works. Clamping to [-r, r], where r is ndim of input and then making positive.
auto normalize_ind = [&](int64_t ind) {
if(ind < (-1 * input_ndim))
{
ind = -1 * input_ndim;
}
if(ind > input_ndim)
{
ind = input_ndim;
}
return (ind >= 0) ? ind : input_ndim + ind;
};
if(contains(info.attributes, "end"))
{
end = normalize_ind(info.attributes.at("end").i());
}
if(contains(info.attributes, "start"))
{
start = normalize_ind(info.attributes.at("start").i());
}
if(end <= start)
{
MIGRAPHX_THROW("PARSE_SHAPE: ending axis <= starting axis, end: " +
std::to_string(end) + " start: " + std::to_string(start));
}
if(input_shape.dynamic())
{
return info.add_instruction(make_op("dimensions_of", {{"start", start}, {"end", end}}),
args[0]);
}
else
{
std::size_t output_ndim = end - start;
std::vector<int64_t> vec_shape(output_ndim);
migraphx::shape s(migraphx::shape::int64_type, {output_ndim});
std::vector<std::size_t> input_lens = input_shape.lens();
std::transform(input_lens.begin() + start,
input_lens.begin() + end,
vec_shape.begin(),
[](auto i) { return int64_t(i); });
return info.add_literal(migraphx::literal{s, vec_shape});
}
}
};
......
......@@ -29,6 +29,7 @@
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/simplify_qdq.hpp>
#include <migraphx/eliminate_common_subexpression.hpp>
#include <migraphx/optimize_module.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
......@@ -48,19 +49,12 @@ MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_INT8_QUANTIZATION_PARAMS)
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
// is very rare in the area of deeping learning, so we just do a
// truncate of the input to get the fp16.
// For the conversion, there could be cases of overflowing or underflowing, but it
// is uncommon. Run optimize_module() before converting to fp16 to const eval and fold in FP32 to
// avoid loss of precision.
void quantize_fp16(program& prog, const std::vector<std::string>& ins_names)
{
run_passes(prog,
{quantize_fp16_pass{ins_names},
eliminate_common_subexpression{},
dead_code_elimination{},
simplify_reshapes{},
dead_code_elimination{},
simplify_qdq{},
dead_code_elimination{}});
run_passes(prog, {optimize_module{}, quantize_fp16_pass{ins_names}, optimize_module{}});
}
void quantize_int8(program& prog,
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -273,9 +273,23 @@ shape shape::from_permutation(type_t t,
shape::type_t shape::type() const { return impl->m_type; }
const std::vector<std::size_t>& shape::lens() const { return impl->m_lens; }
const std::vector<std::size_t>& shape::lens() const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: lens() called on a dynamic shape");
}
return impl->m_lens;
}
const std::vector<std::size_t>& shape::strides() const { return impl->m_strides; }
const std::vector<std::size_t>& shape::strides() const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: strides() called on a dynamic shape");
}
return impl->m_strides;
}
std::size_t shape::ndim() const
{
......@@ -535,7 +549,14 @@ bool shape::any_of_dynamic() const
});
}
const std::vector<shape::dynamic_dimension>& shape::dyn_dims() const { return impl->m_dyn_dims; }
const std::vector<shape::dynamic_dimension>& shape::dyn_dims() const
{
if(not this->dynamic())
{
MIGRAPHX_THROW("SHAPE: dyn_dims() called on a static shape");
}
return impl->m_dyn_dims;
}
std::vector<std::size_t> shape::min_lens() const
{
......@@ -679,12 +700,22 @@ const std::vector<shape>& shape::sub_shapes() const { return impl->m_shapes; }
void migraphx_to_value(value& v, const shape& s)
{
value result;
result["type"] = migraphx::to_value(s.type_string());
result["lens"] = migraphx::to_value(s.lens());
result["strides"] = migraphx::to_value(s.strides());
result["sub_shapes"] = migraphx::to_value(s.sub_shapes());
result["dynamic_dimensions"] = migraphx::to_value(s.dyn_dims());
v = result;
result["type"] = migraphx::to_value(s.type_string());
result["sub_shapes"] = migraphx::to_value(s.sub_shapes());
// avoid calling functions that will throw
if(s.dynamic())
{
result["lens"] = {};
result["strides"] = {};
result["dynamic_dimensions"] = migraphx::to_value(s.dyn_dims());
}
else
{
result["lens"] = migraphx::to_value(s.lens());
result["strides"] = migraphx::to_value(s.strides());
result["dynamic_dimensions"] = {};
}
v = result;
}
void migraphx_from_value(const value& v, shape& s)
......
......@@ -216,6 +216,7 @@ struct find_mlir_op
"quant_dot",
"add",
"clip",
"relu",
"sub",
"mul",
"div",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment