Commit 350bbea2 authored by Umang Yadav's avatar Umang Yadav
Browse files

Merge branch 'develop' into resnet50_partition

parents 848a476d 74ba9649
......@@ -65,6 +65,7 @@ jobs:
# GH actions can not update existing cache, as a workaround clear cache and then save it
- name: Clear tidy cache before saving
continue-on-error: true
if: ${{ steps.tidy_restore.outputs.cache-hit }}
shell: bash
env:
......@@ -72,7 +73,6 @@ jobs:
run: |
gh extension install actions/gh-actions-cache --pin v1.0.1
gh actions-cache delete ${{ steps.tidy_restore.outputs.cache-matched-key }} --confirm
continue-on-error: true
- name: Save cache files for tidy
uses: actions/cache/save@v3
......@@ -124,6 +124,7 @@ jobs:
# GH actions can not update existing cache, as a workaround clear cache and then save it
- name: Clear cppcheck cache before saving
continue-on-error: true
if: ${{ steps.cppcheck_restore.outputs.cache-hit }}
shell: bash
env:
......@@ -131,7 +132,6 @@ jobs:
run: |
gh extension install actions/gh-actions-cache --pin v1.0.1
gh actions-cache delete ${{ steps.cppcheck_restore.outputs.cache-matched-key }} --confirm
continue-on-error: true
- name: Save cache files for cppcheck
uses: actions/cache/save@v3
......@@ -212,6 +212,7 @@ jobs:
mkdir build
cd build
CXX=/opt/rocm/llvm/bin/clang++ CC=/opt/rocm/llvm/bin/clang cmake \
-DMIGRAPHX_DISABLE_LARGE_BUFFER_TESTS=On \
-DBUILD_DEV=On \
-DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache \
-DCMAKE_C_COMPILER_LAUNCHER=/usr/local/bin/ccache \
......@@ -219,12 +220,12 @@ jobs:
make -j$(nproc) tests driver
- name: Clear ccache cache before saving
continue-on-error: true
if: ${{ steps.ccache_restore.outputs.cache-hit }}
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set +x
gh extension install actions/gh-actions-cache --pin v1.0.1
gh actions-cache delete ${{ steps.ccache_restore.outputs.cache-matched-key }} --confirm
......@@ -365,6 +366,7 @@ jobs:
rbuild build -d cget -s gh -T check \
-DCMAKE_BUILD_TYPE=${{matrix.configuration}} \
-DMIGRAPHX_ENABLE_PYTHON=${{matrix.configuration == 'release' && 'On' || 'Off'}} \
-DMIGRAPHX_DISABLE_LARGE_BUFFER_TESTS=On \
-DBUILD_DEV=On \
-DCMAKE_CXX_FLAGS_DEBUG="-g1 -Os -fdebug-prefix-map=$PWD=. -fdebug-types-section -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=undefined" \
-DCMAKE_CXX_FLAGS_CODECOV="-g1 -Og -fdebug-prefix-map=$PWD=. -fdebug-types-section -fprofile-arcs -ftest-coverage -fno-omit-frame-pointer" \
......@@ -374,12 +376,12 @@ jobs:
# GH actions can not update existing cache, as a workaround clear cache and then save it
- name: Clear ccache cache before saving
continue-on-error: true
if: ${{ steps.ccache_restore.outputs.cache-hit }}
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set +x
gh extension install actions/gh-actions-cache --pin v1.0.1
gh actions-cache delete ${{ steps.ccache_restore.outputs.cache-matched-key }} --confirm
......@@ -481,6 +483,7 @@ jobs:
rbuild build -d cget -s gh -T check \
-DCMAKE_BUILD_TYPE=${{matrix.configuration}} \
-DMIGRAPHX_ENABLE_PYTHON=${{matrix.configuration == 'release' && 'On' || 'Off'}} \
-DMIGRAPHX_DISABLE_LARGE_BUFFER_TESTS=On \
-DBUILD_DEV=On \
-DCMAKE_CXX_FLAGS_DEBUG="-g1 -Os -fdebug-prefix-map=$PWD=. -fdebug-types-section -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=undefined" \
-DCMAKE_CXX_FLAGS_CODECOV="-g1 -Og -fdebug-prefix-map=$PWD=. -fdebug-types-section -fprofile-arcs -ftest-coverage -fno-omit-frame-pointer" \
......@@ -491,15 +494,14 @@ jobs:
# this is a workaround, with GH actions can not update existing cache
- name: Clear ccache cache before saving
continue-on-error: true
if: ${{ steps.ccache_restore_fpga.outputs.cache-hit }}
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set +x
gh extension install actions/gh-actions-cache
gh actions-cache delete ${{ steps.ccache_restore_fpga.outputs.cache-matched-key }} --confirm
continue-on-error: true
- name: Save cache files for ccache
uses: actions/cache/save@v3
......
......@@ -21,4 +21,4 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
ROCmSoftwarePlatform/rocMLIR@3657f509bfed86bb79d5c6e24aa237e48f09f9f3 -DBUILD_FAT_LIBROCKCOMPILER=On
ROCmSoftwarePlatform/rocMLIR@2c519c48eaa278d13e6c40bc0941119826d71512 -DBUILD_FAT_LIBROCKCOMPILER=On
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
......@@ -28,6 +28,7 @@ include(ROCMInstallTargets)
include(ROCMPackageConfigHelpers)
include(RegisterOp)
include(CheckCXXLinkerFlag)
add_library(migraphx
adjust_allocation.cpp
......@@ -185,6 +186,8 @@ register_migraphx_ops(
quant_convolution
quant_dot
quantizelinear
random_uniform
random_seed
recip
reduce_max
reduce_mean
......
......@@ -45,6 +45,9 @@ if(NOT WIN32)
endif()
rocm_clang_tidy_check(driver)
file(STRINGS "${CMAKE_SOURCE_DIR}/test/onnx/.onnxrt-commit" String_output)
target_compile_definitions(driver PUBLIC MIGRAPHX_ORT_SHA1="${String_output}")
target_link_libraries(driver migraphx_all_targets migraphx_onnx migraphx_tf migraphx_py)
rocm_install_targets(
......
......@@ -475,13 +475,15 @@ struct compiler
{
if(is_offload_copy_set(p) and not co.offload_copy)
{
std::cout << "MIGraphX program was likely compiled with offload_copy set, Try "
"passing "
"`--enable-offload-copy` if program run fails.\n";
std::cout
<< "[WARNING]: MIGraphX program was likely compiled with offload_copy "
"set, Try "
"passing "
"`--enable-offload-copy` if program run fails.\n";
}
else if(co.offload_copy)
{
std::cout << "MIGraphX program was likely compiled without "
std::cout << "[WARNING]: MIGraphX program was likely compiled without "
"offload_copy set, Try "
"removing "
"`--enable-offload-copy` flag if passed to driver, if program run "
......@@ -802,6 +804,13 @@ int main(int argc, const char* argv[])
auto&& m = get_commands();
auto cmd = args.front();
if(cmd == "ort-sha")
{
std::cout << MIGRAPHX_ORT_SHA1 << std::endl;
return 0;
}
if(m.count(cmd) > 0)
{
m.at(cmd)(argv[0], {args.begin() + 1, args.end()});
......
......@@ -70,13 +70,19 @@ struct check_shapes
check_dynamic();
}
template <class Op>
template <class Op, MIGRAPHX_REQUIRES(not std::is_convertible<Op, std::string>{})>
check_shapes(const std::vector<shape>& s, const Op& op, const bool d = false)
: begin(s.begin()), end(s.end()), name(op.name()), dynamic_allowed(d)
{
check_dynamic();
}
check_shapes(const std::vector<shape>& s, const std::string& n, const bool d = false)
: begin(s.begin()), end(s.end()), name(n), dynamic_allowed(d)
{
check_dynamic();
}
void check_dynamic() const
{
if(not dynamic_allowed and this->any_of([&](const shape& s) { return s.dynamic(); }))
......@@ -228,6 +234,16 @@ struct check_shapes
return *this;
}
/*!
* Check all shapes have the same layout.
*/
const check_shapes& same_layout() const
{
if(not this->same([](const shape& s) { return find_permutation(s); }))
MIGRAPHX_THROW(prefix() + "Layouts do not match");
return *this;
}
/*!
* Check all shapes are standard.
*/
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -62,7 +62,7 @@ void convolution(Output output, T input, T weights, Padding padding, Stride stri
shape win_shape{output_shape.type(), win_size};
double acc = 0.0;
shape_for_each(win_shape, [&](auto idx_win) {
shape_for_each(win_shape, [&](const auto& idx_win) {
auto k = idx_win[0];
const auto in_ch = group_id * wei_c + k;
std::vector<std::ptrdiff_t> idx(idx_o.begin(), idx_o.end());
......
......@@ -36,20 +36,48 @@ namespace op {
struct allocate
{
shape s{};
// for dynamic allocate to set the buffer type
shape::type_t buf_type = shape::half_type;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"));
return pack(f(self.s, "shape"), f(self.buf_type, "buf_type"));
}
std::string name() const { return "allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
migraphx::check_shapes{inputs, *this, true}.has(0);
return s;
migraphx::check_shapes{inputs, *this, true}.has(0, 1);
// check if shape attribute is not default
if(s != shape())
{
return s;
}
else
{
const auto& out_dims = inputs.at(0);
assert(not out_dims.dynamic());
assert(out_dims.ndim() == 1);
std::size_t max_val = std::numeric_limits<std::size_t>::max();
std::vector<shape::dynamic_dimension> dyn_dims(out_dims.lens().at(0),
shape::dynamic_dimension{0, max_val});
return {buf_type, dyn_dims};
}
}
argument compute(const shape& output_shape, const std::vector<argument>&) const
argument compute(const shape& output_shape, const std::vector<argument>& args) const
{
return {output_shape};
if(args.empty())
{
return {output_shape};
}
else
{
std::vector<std::size_t> output_dims(output_shape.ndim());
args.at(0).visit([&](auto a) { output_dims.assign(a.begin(), a.end()); });
return {shape{buf_type, output_dims}};
}
}
};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -164,7 +164,7 @@ struct convolution_backwards
shape win_shape{dyn_out.computed_shape.type(), win_size};
par_dfor(in_n, wei_c)([&](int o, int k) {
shape_for_each(win_shape, [&](auto idx_win) {
shape_for_each(win_shape, [&](const auto& idx_win) {
const int w = idx_win[0];
auto input_dims_start = idx_win.begin() + 1;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -125,13 +125,12 @@ struct gather
auto out_lens = data.get_shape().lens();
out_lens[axis] = indices.get_shape().elements();
migraphx::shape out_comp_shape{data.get_shape().type(), out_lens};
shape_for_each(out_comp_shape, [&](const auto& out_idx) {
auto data_idx = out_idx;
auto in_index = indices[data_idx[axis]];
in_index = (in_index < 0) ? in_index + axis_dim_size : in_index;
data_idx[axis] = in_index;
output[out_comp_shape.index(out_idx.begin(), out_idx.end())] =
data(data_idx.begin(), data_idx.end());
shape_for_each(out_comp_shape, [&](const auto& out_idx_v, size_t out_idx) {
auto data_idx = out_idx_v;
auto in_index = indices[data_idx[axis]];
in_index = (in_index < 0) ? in_index + axis_dim_size : in_index;
data_idx[axis] = in_index;
output[out_idx] = data(data_idx.begin(), data_idx.end());
});
}
});
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -258,7 +258,7 @@ struct nonmaxsuppression
selected_boxes_inside_class.reserve(max_output_shape.elements());
// iterate over batches and classes
shape comp_s{shape::double_type, {num_batches, num_classes}};
shape_for_each(comp_s, [&](auto idx) {
shape_for_each(comp_s, [&](const auto& idx) {
auto batch_idx = idx[0];
auto class_idx = idx[1];
// index offset for this class
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -56,10 +56,10 @@ struct nonzero
std::vector<std::vector<std::size_t>> vec_idx;
auto s = args.front().get_shape();
args.front().visit([&](auto v) {
shape_for_each(s, [&](auto idx) {
if(not float_equal(v[s.index(idx)], 0))
shape_for_each(s, [&](const auto& idx_v, size_t idx) {
if(not float_equal(v[idx], 0))
{
vec_idx.push_back(idx);
vec_idx.push_back(idx_v);
}
});
});
......
......@@ -365,7 +365,7 @@ struct pooling
double output_val = op.template init<Type>();
// for each element in the window...
shape_for_each(win_shape, [&](auto idx_w) {
shape_for_each(win_shape, [&](const auto& idx_w) {
// the coordinates of this element
auto idx = idx_o;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_RANDOM_SEED_HPP
#define MIGRAPHX_GUARD_OPERATORS_RANDOM_SEED_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <random>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Generates a random seed for the use of random number generators. Generating the seed
* at runtime guarantees there will be a different random sequence on every execution.
* This operation has no inputs or attributes, and outputs an unsigned integer tensor with
* a single value.
*/
struct random_seed
{
shape::type_t dtype = shape::type_t::uint64_type;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.dtype, "dtype"));
}
std::string name() const { return "random_seed"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(0);
return shape{dtype};
}
argument compute(const shape& output_shape, const std::vector<argument>&) const
{
argument result(output_shape);
result.visit([&](auto output) { output.front() = std::random_device{}(); });
return result;
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* Random Uniform distribution operator. Given a shape, populate it with random
* values. Calls to random_uniform using the same randomization seed as a
* literal input will
* always generate the same pseudo-random sequence.
*
* Inputs: (1) randomization seed (any type is allowed)
* (2) output buffer argument to be populated.
*
* Attributes: none
*
* Output: Returns the buffer from input #2.
*
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_RANDOM_UNIFORM_HPP
#define MIGRAPHX_GUARD_OPERATORS_RANDOM_UNIFORM_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <random>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* random_uniform populates the passed shape with random numbers, in a uniform
* distribution. Range for floating-point data types is (0, 1);
* for integer types it is [0, <max value for the type>]
*/
struct random_uniform
{
// The random_uniform operation needs the random number generator seed
// to be passed as a runtime input.
std::string name() const { return "random_uniform"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this, true}.has(2);
return inputs.at(1);
}
argument compute(const shape&, std::vector<argument> args) const
{
// Output goes into the passed buffer, not the shape output.
auto result = args[1];
uint64_t local_seed = args[0].at<uint64_t>(0);
std::mt19937 gen(local_seed);
result.visit([&](auto output) {
using type = typename decltype(output)::value_type;
if constexpr(std::is_integral<type>{})
{
// default range for all integer types is
// (0, std::uniform_int_distribution<type>::max()).
// Todo: enable different ranges
std::uniform_int_distribution<type> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
else
{
// default real distribution type is double with range (0, 1);
std::uniform_real_distribution<> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
});
return result;
}
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 1; }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -163,7 +163,7 @@ struct reduce_op : op_name<Derived>
auto& self = static_cast<const Derived&>(*this);
auto data_idx = out_idx;
accumulator val = self.init();
shape_for_each(batch_shape, [&](auto b_idx) {
shape_for_each(batch_shape, [&](const auto& b_idx) {
this->tune_dims(tuned_axes, b_idx, data_idx);
accumulator x = input(data_idx.begin(), data_idx.end());
val = self.op()(accumulator{self.input()(x)}, val);
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -70,13 +70,13 @@ struct reverse
argument result{s};
auto lens = s.lens();
visit_all(result, args.front())([&](auto output, auto input) {
shape_for_each(s, [&](const auto& out_idx) {
auto in_idx = out_idx;
shape_for_each(s, [&](const auto& out_idx_v, size_t out_idx) {
auto in_idx = out_idx_v;
for(const auto& axis : axes)
{
in_idx[axis] = lens[axis] - 1 - out_idx[axis];
in_idx[axis] = lens[axis] - 1 - out_idx_v[axis];
}
output[s.index(out_idx)] = input[s.index(in_idx)];
output[out_idx] = input[s.index(in_idx)];
});
});
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -113,10 +113,9 @@ struct roialign
{
std::vector<pos_weight> results(bin_grid_size[0] * bin_grid_size[1] * output_height *
output_width);
shape_for_each(comp_s, [&](auto idx) {
std::array<std::size_t, 2> p = {idx[0], idx[1]};
std::array<std::size_t, 2> i = {idx[2], idx[3]};
auto index = comp_s.index(idx);
shape_for_each(comp_s, [&](const auto& idx_v, size_t index) {
std::array<std::size_t, 2> p = {idx_v[0], idx_v[1]};
std::array<std::size_t, 2> i = {idx_v[2], idx_v[3]};
std::array<float, 2> xy{};
std::array<int64_t, 2> low{};
......@@ -255,7 +254,7 @@ struct roialign
std::vector<std::size_t> comp_lens1 = {channels, out_dims[0], out_dims[1]};
shape comp_s1{migraphx::shape::float_type, comp_lens1};
std::vector<int64_t> vec_index(channels, 0);
shape_for_each(comp_s1, [&](auto idx) {
shape_for_each(comp_s1, [&](const auto& idx) {
auto c = idx[0];
auto ph = idx[1];
auto pw = idx[2];
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment