Unverified Commit 1530ec24 authored by Ted Themistokleous's avatar Ted Themistokleous Committed by GitHub
Browse files

Merge branch 'develop' into add_parity_check_ci

parents 5c98fcb0 c2e01b10
......@@ -26,6 +26,7 @@
#include <migraphx/config.hpp>
#include <migraphx/filesystem.hpp>
#include <migraphx/optional.hpp>
#include <functional>
#include <memory>
#include <vector>
......@@ -43,6 +44,9 @@ struct MIGRAPHX_EXPORT dynamic_loader
return path(reinterpret_cast<void*>(address));
}
static fs::path path(void* address);
static optional<dynamic_loader> try_load(const fs::path& p);
dynamic_loader() = default;
dynamic_loader(const fs::path& p);
......
......@@ -80,7 +80,7 @@ struct marker
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -233,7 +233,7 @@ struct marker
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -381,22 +381,24 @@ void find_matches_for(source_location location, Mod& mod, instruction_ref ins, M
const int trace = value_of(MIGRAPHX_TRACE_MATCHES{});
const bool validate = enabled(MIGRAPHX_VALIDATE_MATCHES{});
const auto trace_filter = string_value_of(MIGRAPHX_TRACE_MATCHES_FOR{});
const bool trace_for = not trace_filter.empty() and
(contains(std::string{location.file_name()}, trace_filter) or
contains(std::string{location.function_name()}, trace_filter));
bool match = false;
bool match = false;
each_args(
[&](auto&& m) {
const auto& matcher_name = get_type_name(m);
const bool trace_for = not trace_filter.empty() and
(contains(std::string{location.file_name()}, trace_filter) or
contains(std::string{location.function_name()}, trace_filter) or
contains(matcher_name, trace_filter));
if(match)
return;
if(trace > 1 or trace_for)
std::cout << "Match: " << get_type_name(m) << std::endl;
if(trace > 1 and trace_for)
std::cout << "Match: " << matcher_name << std::endl;
auto r = match_instruction(get_module(mod), ins, m.matcher());
if(r.result == get_module(mod).end())
return;
if(trace > 0 or trace_for)
{
std::cout << "Matched by " << get_type_name(m) << std::endl;
std::cout << "Matched by " << matcher_name << std::endl;
get_module(mod).debug_print(ins);
}
// If its already invalid dont validate it again
......@@ -407,7 +409,7 @@ void find_matches_for(source_location location, Mod& mod, instruction_ref ins, M
auto invalid = get_module(mod).validate();
if(invalid != get_module(mod).end())
{
std::cout << "Invalid program from match: " << get_type_name(m) << std::endl;
std::cout << "Invalid program from match: " << matcher_name << std::endl;
std::cout << "Invalid instructions: " << std::endl;
get_module(mod).debug_print(invalid->inputs());
get_module(mod).debug_print(invalid);
......@@ -621,6 +623,8 @@ MIGRAPHX_PRED_MATCHER(broadcast, instruction_ref ins)
template <class... Ms>
auto skip(Ms... ms)
{
static_assert(((not std::is_convertible<Ms, std::string>{}) and ...),
"Use a matcher not a string for skip.");
auto m = any_of(ms...);
return make_basic_fun_matcher([=](matcher_context& ctx, instruction_ref start) {
return fix<optional<instruction_ref>>(
......
......@@ -222,7 +222,17 @@ struct MIGRAPHX_EXPORT module
void annotate(std::ostream& os, std::function<void(instruction_ref)> a) const;
std::vector<module_ref> get_sub_modules(bool shallow = false) const;
/* sorts the module in topological order aka reverse-post order (RPO) DFS order
it takes last instruction or @return as the root and walks back the graph and moves inputs
of the each instruction such that it appears before the instruction itself.
*/
module& sort();
/* Any instruction "X" can have module arguments and those modules inside them can use any other
* instruction "Y" from predecessor modules of the instruction "X". Such instruction "Y" inside
* module args are not listed as input instructions to "X". But those instructions "Y" must be
* evaluted before the instruction "X" can. Therefore such "Y" instructions are considered
* implicit dependency to "X".
*/
ins_dep_map calc_implicit_deps() const;
MIGRAPHX_EXPORT friend std::ostream& operator<<(std::ostream& os, const module& m);
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -28,6 +28,7 @@
#include <migraphx/shape.hpp>
#include <cstring>
#include <vector>
#include <migraphx/op/normalize_attribute.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -42,6 +43,36 @@ struct select_dependent_type
template <class T, class... Ts>
using dependent_type = typename select_dependent_type<T, Ts...>::type;
/**
* Used to normalize variable input axes at model runtime.
* Example: the axes inputs of the slice operator.
*
* \param axes the axes to normalize
* \param input_shape shape of the input tensor
* \param attr_val the normalize_axes attributes from the operator
* \param prefix error message prefix
*/
std::vector<int64_t> normalize_axes(const std::vector<int64_t>& axes,
const shape& input_shape,
const value& attr_val,
const std::string& prefix = "");
/**
* Used to normalize variable input axes at model runtime.
* Example: the starts and ends inputs of the slice operator.
*
* \param indices the indices to normalize
* \param axes which axes the indices apply over
* \param input_shape shape of the input tensor
* \param attr_val the normalize_axes attributes from the operator
* \param prefix error message prefix
*/
std::vector<int64_t> normalize_indices(const std::vector<int64_t>& indices,
const std::vector<int64_t>& axes,
const shape& input_shape,
const value& attr_val,
const std::string& prefix = "");
MIGRAPHX_EXPORT
bool normalize_attributes(operation& op, const shape& input_shape);
......
......@@ -36,20 +36,48 @@ namespace op {
struct allocate
{
shape s{};
// for dynamic allocate to set the buffer type
shape::type_t buf_type = shape::half_type;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.s, "shape"));
return pack(f(self.s, "shape"), f(self.buf_type, "buf_type"));
}
std::string name() const { return "allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
migraphx::check_shapes{inputs, *this, true}.has(0);
return s;
migraphx::check_shapes{inputs, *this, true}.has(0, 1);
// check if shape attribute is not default
if(s != shape())
{
return s;
}
else
{
const auto& out_dims = inputs.at(0);
assert(not out_dims.dynamic());
assert(out_dims.ndim() == 1);
std::size_t max_val = std::numeric_limits<std::size_t>::max();
std::vector<shape::dynamic_dimension> dyn_dims(out_dims.lens().at(0),
shape::dynamic_dimension{0, max_val});
return {buf_type, dyn_dims};
}
}
argument compute(const shape& output_shape, const std::vector<argument>&) const
argument compute(const shape& output_shape, const std::vector<argument>& args) const
{
return {output_shape};
if(args.empty())
{
return {output_shape};
}
else
{
std::vector<std::size_t> output_dims(output_shape.ndim());
args.at(0).visit([&](auto a) { output_dims.assign(a.begin(), a.end()); });
return {shape{buf_type, output_dims}};
}
}
};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -33,8 +33,12 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
// Specifies where to add the "extra" cell of padding if the
// calculated padding is an odd number.
// Padding mode is default_ for fixed shape padding.
// same_lower and same_upper used for dynamic padding.
// same_lower and same_upper specify dynamic padding.
// The odd cell goes at the beginning of the dimension
// (same_lower) or end (same_upper).
enum padding_mode_t
{
default_, // NOLINT
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -82,7 +82,7 @@ struct convolution
const auto input_ndim = inputs[0].ndim();
const auto padding_size = padding.size();
if(input_ndim != padding_size / 2 + 2 && input_ndim != padding_size + 2)
if(input_ndim != padding_size / 2 + 2 and input_ndim != padding_size + 2)
{
MIGRAPHX_THROW("CONVOLUTION: input and attribute size mismatch!");
}
......@@ -206,6 +206,7 @@ struct convolution
std::vector<std::size_t> new_padding;
if(padding_mode != op::padding_mode_t::default_)
{
// auto-Calculate the padding sizes with calc_dyn_auto_pad
auto input_lens = args[0].get_shape().lens();
auto weights_lens = args[1].get_shape().lens();
new_padding =
......@@ -217,6 +218,7 @@ struct convolution
}
else
{
// Use the padding that was given
new_padding = padding;
if(output_shape.dynamic())
{
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -164,7 +164,7 @@ struct convolution_backwards
shape win_shape{dyn_out.computed_shape.type(), win_size};
par_dfor(in_n, wei_c)([&](int o, int k) {
shape_for_each(win_shape, [&](auto idx_win) {
shape_for_each(win_shape, [&](const auto& idx_win) {
const int w = idx_win[0];
auto input_dims_start = idx_win.begin() + 1;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_FILL_HPP
#define MIGRAPHX_GUARD_OPERATORS_FILL_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/par_for.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* fill(default_value, output_buffer)
* Fill an output buffer with the given default_value.
* Note that if the default_value is a literal and the output_buffer
* has a static shape this operator can be replaced with a literal.
*/
struct fill
{
std::string name() const { return "fill"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this, true}.has(2).same_type();
if(inputs.at(0).dynamic() or inputs.at(0).elements() != 1)
{
MIGRAPHX_THROW("FILL: default_value is dynamic or more than one element");
}
return inputs.back();
}
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{
visit_all(args[0], args[1])([&](auto value, auto output) {
par_for(dyn_out.computed_shape.elements(), [&](auto i) { output[i] = value.front(); });
});
return args[1];
}
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 1; }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -125,13 +125,12 @@ struct gather
auto out_lens = data.get_shape().lens();
out_lens[axis] = indices.get_shape().elements();
migraphx::shape out_comp_shape{data.get_shape().type(), out_lens};
shape_for_each(out_comp_shape, [&](const auto& out_idx) {
auto data_idx = out_idx;
auto in_index = indices[data_idx[axis]];
in_index = (in_index < 0) ? in_index + axis_dim_size : in_index;
data_idx[axis] = in_index;
output[out_comp_shape.index(out_idx.begin(), out_idx.end())] =
data(data_idx.begin(), data_idx.end());
shape_for_each(out_comp_shape, [&](const auto& out_idx_v, size_t out_idx) {
auto data_idx = out_idx_v;
auto in_index = indices[data_idx[axis]];
in_index = (in_index < 0) ? in_index + axis_dim_size : in_index;
data_idx[axis] = in_index;
output[out_idx] = data(data_idx.begin(), data_idx.end());
});
}
});
......
......@@ -71,7 +71,7 @@ struct if_op
std::unordered_map<std::string, argument> params;
std::set<std::string> pnames;
for(const auto& smod : mods)
for(const_module_ref smod : mods)
{
auto names = smod->get_parameter_names();
pnames.insert(names.begin(), names.end());
......
......@@ -59,9 +59,9 @@ struct loop
MIGRAPHX_THROW("LOOP: operator should have one submodule.");
}
const auto& mod = mods.front();
auto mod_out_shapes = mod->get_output_shapes();
auto dep_param_num = inputs.size() - 2;
const_module_ref mod = mods.front();
auto mod_out_shapes = mod->get_output_shapes();
auto dep_param_num = inputs.size() - 2;
// first item of the mod output shapes is condition used in loop,
// which is not needed to compute output shape
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -258,7 +258,7 @@ struct nonmaxsuppression
selected_boxes_inside_class.reserve(max_output_shape.elements());
// iterate over batches and classes
shape comp_s{shape::double_type, {num_batches, num_classes}};
shape_for_each(comp_s, [&](auto idx) {
shape_for_each(comp_s, [&](const auto& idx) {
auto batch_idx = idx[0];
auto class_idx = idx[1];
// index offset for this class
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -56,10 +56,10 @@ struct nonzero
std::vector<std::vector<std::size_t>> vec_idx;
auto s = args.front().get_shape();
args.front().visit([&](auto v) {
shape_for_each(s, [&](auto idx) {
if(not float_equal(v[s.index(idx)], 0))
shape_for_each(s, [&](const auto& idx_v, size_t idx) {
if(not float_equal(v[idx], 0))
{
vec_idx.push_back(idx);
vec_idx.push_back(idx_v);
}
});
});
......
......@@ -29,6 +29,7 @@
#include <migraphx/config.hpp>
#include <migraphx/value.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/pad_calc.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/dyn_output.hpp>
......@@ -40,10 +41,20 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
// The Pooling operator mostly follows the specifications for the Onnx pooling op.
// It assumes an NCHW layout, extended to support any number of spatial dimensions
// from 1 on up; dimensions are <batch index, channels, spatial dimensions...>
//
struct pooling
{
// Class members mode, ceil_mode, padding_mode have similar names but refer to separate
// concepts.
pooling_mode mode = {pooling_mode::average};
// If the input has rank other than 4 then padding, stride, lengths must all be specified
// since the defaults have 2-dimensions. Exception: padding not required if
// padding_mode != default_
// Padding along each spatial input dimension
// Can be ndim or 2*ndim values where ndim is size of lengths
// ndim values means pad the same before and after each dimension
......@@ -63,13 +74,14 @@ struct pooling
// ceiling mode is a flag affecting output size
// or equivalently, placements of the pooling kernel.
// When true, round the size upwards, possibly
// including partial placements where the kernel extends beyond the edge
// of input and even padding. When false, round down so that all
// When true, round the size upwards. When false, round down so that all
// kernel placements fit but some input values may be dropped.
bool ceil_mode = false;
int lp_order = 2;
// Mode for auto padding. default_ indicates no auto padding.
padding_mode_t padding_mode = padding_mode_t::default_;
// Global pooling with dynamic shape input
bool dyn_global = false;
......@@ -84,6 +96,7 @@ struct pooling
{
return pack(f(self.mode, "mode"),
f(self.padding, "padding"),
f(self.padding_mode, "padding_mode"),
f(self.stride, "stride"),
f(self.lengths, "lengths"),
f(self.ceil_mode, "ceil_mode"),
......@@ -97,7 +110,8 @@ struct pooling
{
if(dyn_global)
return;
if((padding.size() != stride.size() and (padding.size()) != stride.size() * 2) or
if((padding_mode != default_ and padding.size() != stride.size() and
(padding.size()) != stride.size() * 2) or
stride.size() != lengths.size())
{
MIGRAPHX_THROW("POOLING: inconsistent attribute sizes");
......@@ -137,8 +151,19 @@ struct pooling
std::size_t padding_factor = 2 * padding[i];
if(padding.size() == 2 * kdims)
padding_factor = padding[i] + padding[i + kdims];
assert(input_lens[i + 2] + padding_factor >= lengths[i]);
std::size_t dim_size = input_lens[i + 2] + padding_factor - lengths[i];
std::size_t dim_size;
if(input_lens[i + 2] + padding_factor < lengths[i])
{
if(padding_mode == default_)
MIGRAPHX_THROW("POOLING: not enough padding for the given kernel size");
// lengths can be legitimately larger only if we're doing auto padding
// with a dynamic shape, in which case given padding is ignored. Set a dummy value.
dim_size = 2;
}
else
{
dim_size = input_lens[i + 2] + padding_factor - lengths[i];
}
std::size_t len =
(ceil_mode)
? dim_size / stride[i] +
......@@ -151,17 +176,13 @@ struct pooling
shape normalize_compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this, true}.has(1);
check_shapes{inputs, *this, true}.has(1).min_ndims(3);
check_attribute_size();
const shape& input = inputs.at(0);
auto padding_size = padding.size();
auto stride_size = stride.size();
size_t kdims = input.ndim() - 2;
if(input.ndim() < 3)
{
MIGRAPHX_THROW("POOLING: input must have 3 or more dimensions and be nonempty");
}
if(input.ndim() * 2 != padding_size + 4 and input.ndim() != padding_size + 2)
if(input.ndim() != stride_size + 2)
{
MIGRAPHX_THROW("POOLING: input and attribute size mismatch!");
}
......@@ -179,6 +200,28 @@ struct pooling
}
return {input.type(), output_dyn_dims};
}
else if(padding_mode != default_)
{
const size_t num_spatial_dims = inputs[0].ndim() - 2;
const shape& x_shape = inputs[0];
// same as convolution::dynamic_compute_shape()
for(std::size_t i = 0; i < num_spatial_dims; ++i)
{
auto ceil_div = [](std::size_t x, std::size_t y) { return (x + y - 1) / y; };
auto s = stride[i];
auto x = x_shape.dyn_dims()[i + 2];
std::set<std::size_t> optimals{};
std::transform(x.optimals.begin(),
x.optimals.end(),
std::inserter(optimals, optimals.begin()),
[&](auto o) { return ceil_div(o, s); });
output_dyn_dims.push_back(
shape::dynamic_dimension{ceil_div(x.min, s), ceil_div(x.max, s), optimals});
}
return {input.type(), output_dyn_dims};
}
else
{
// does not compute optimals
......@@ -267,6 +310,7 @@ struct pooling
Out& output,
const In& input,
const std::vector<std::size_t>& kernel_dims,
const std::vector<std::size_t>& padding_vals,
Op op) const
{
auto in_s = input.get_shape();
......@@ -283,9 +327,9 @@ struct pooling
// For each spatial dimension, find starting and ending index of pooling kernel
for(std::size_t dim = 2; dim < n_dim; ++dim)
{
auto d_2 = dim - 2;
int start =
static_cast<int>(idx_o[dim] * stride[d_2]) - static_cast<int>(padding[d_2]);
auto d_2 = dim - 2;
int start = static_cast<int>(idx_o[dim] * stride[d_2]) -
static_cast<int>(padding_vals[d_2]);
int end;
// NOLINT
if(count_include_pad and ceil_mode and (mode != pooling_mode::max))
......@@ -297,7 +341,7 @@ struct pooling
// Check if this kernel extends beyond the padding at end of dimension
end = std::min(start + kernel_dims[d_2],
in_lens[dim] + static_cast<int>(padding[d_2]));
in_lens[dim] + static_cast<int>(padding_vals[d_2]));
}
else
{
......@@ -316,11 +360,12 @@ struct pooling
}
shape win_shape{output_shape.type(), win_size};
auto pool_size = win_shape.elements();
double output_val = op.template init<Type>();
// for each element in the window...
shape_for_each(win_shape, [&](auto idx_w) {
shape_for_each(win_shape, [&](const auto& idx_w) {
// the coordinates of this element
auto idx = idx_o;
......@@ -354,30 +399,65 @@ struct pooling
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{
argument result{dyn_out.computed_shape};
argument result;
auto input_lens = args[0].get_shape().lens();
std::vector<std::size_t> kernel_dims;
shape output_shape;
// If we have to auto-calculate padding, it will be passed to calc_pooling() as an argument
// instead of the member variable padding.
std::vector<std::size_t> temp_padding(padding);
if(dyn_global)
{
// for dynamic GlobalPooling, there's no padding
kernel_dims.insert(kernel_dims.end(), input_lens.begin() + 2, input_lens.end());
output_shape = dyn_out.computed_shape;
result = dyn_out.computed_shape;
}
else
else if((padding_mode != op::padding_mode_t::default_))
{
// if padding_mode is set, input was a dynamic size. Calculate padded size now.
// kernel_lens is the same as kernel_dims, but prepended with the 2 non-
// spatial dimensions. For size computations, it's used like the weights
// tensor for convolutions.
std::vector<std::size_t> kernel_lens;
kernel_lens.insert(kernel_lens.end(), input_lens.begin(), input_lens.begin() + 2);
kernel_lens.insert(kernel_lens.end(), lengths.begin(), lengths.end());
kernel_dims = this->lengths;
auto type = args[0].get_shape().type();
// dilation not currently supported for pooling, so default to all 1's
temp_padding = calc_dyn_auto_pad(
input_lens, kernel_lens, stride, {1, 1}, bool(padding_mode == op::same_upper));
output_shape = compute_padded_pool_shape(
args[0].get_shape(), shape(type, kernel_dims), temp_padding, stride, {1, 1});
result = argument(output_shape);
}
else // fixed/static input
{
kernel_dims = this->lengths;
output_shape = dyn_out.computed_shape;
result = dyn_out.computed_shape;
}
// Perform the computation and populate result
visit_all(result, args[0])([&](auto output, auto input) {
using type = typename decltype(output)::value_type;
switch(mode)
{
case migraphx::op::pooling_mode::average:
calc_pooling<type>(dyn_out.computed_shape, output, input, kernel_dims, avg_pool{});
calc_pooling<type>(
output_shape, output, input, kernel_dims, temp_padding, avg_pool{});
break;
case migraphx::op::pooling_mode::max:
calc_pooling<type>(dyn_out.computed_shape, output, input, kernel_dims, max_pool{});
calc_pooling<type>(
output_shape, output, input, kernel_dims, temp_padding, max_pool{});
break;
case migraphx::op::pooling_mode::lpnorm:
calc_pooling<type>(
dyn_out.computed_shape, output, input, kernel_dims, lpnorm_pool{lp_order});
output_shape, output, input, kernel_dims, temp_padding, lpnorm_pool{lp_order});
break;
}
});
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_SCAN_OP_HPP
#define MIGRAPHX_GUARD_OPERATORS_SCAN_OP_HPP
......@@ -37,6 +38,12 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Parent struct for prefix scan operations. A prefix scan is equivalent to the C++
* std::exclusive_scan or std::inclusive_scan. Given a list of numbers, a prefix scan
* sum op returns an equal size list of running totals of the values. Other operations
* besides addition can be supported by their own child ops.
*/
template <class Derived>
struct prefix_scan_op : op_name<Derived>
{
......@@ -60,9 +67,13 @@ struct prefix_scan_op : op_name<Derived>
shape normalize_compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
check_shapes{inputs, *this, true}.has(1);
auto s = inputs.front();
if(s.broadcasted())
if(s.dynamic())
{
return s;
}
else if(s.broadcasted())
{
return {s.type(), s.lens()};
}
......@@ -72,8 +83,9 @@ struct prefix_scan_op : op_name<Derived>
}
}
argument compute(const shape& output_shape, std::vector<argument> args) const
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{
shape output_shape(dyn_out.computed_shape);
argument result{output_shape};
auto s = args[0].get_shape();
if(s == output_shape)
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_RANDOM_SEED_HPP
#define MIGRAPHX_GUARD_OPERATORS_RANDOM_SEED_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <random>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Generates a random seed for the use of random number generators. Generating the seed
* at runtime guarantees there will be a different random sequence on every execution.
* This operation has no inputs or attributes, and outputs an unsigned integer tensor with
* a single value.
*/
struct random_seed
{
shape::type_t dtype = shape::type_t::uint64_type;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.dtype, "dtype"));
}
std::string name() const { return "random_seed"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(0);
return shape{dtype};
}
argument compute(const shape& output_shape, const std::vector<argument>&) const
{
argument result(output_shape);
result.visit([&](auto output) { output.front() = std::random_device{}(); });
return result;
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* Random Uniform distribution operator. Given a shape, populate it with random
* values. Calls to random_uniform using the same randomization seed as a
* literal input will
* always generate the same pseudo-random sequence.
*
* Inputs: (1) randomization seed (any type is allowed)
* (2) output buffer argument to be populated.
*
* Attributes: none
*
* Output: Returns the buffer from input #2.
*
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_RANDOM_UNIFORM_HPP
#define MIGRAPHX_GUARD_OPERATORS_RANDOM_UNIFORM_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <random>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* random_uniform populates the passed shape with random numbers, in a uniform
* distribution. Range for floating-point data types is (0, 1);
* for integer types it is [0, <max value for the type>]
*/
struct random_uniform
{
// The random_uniform operation needs the random number generator seed
// to be passed as a runtime input.
std::string name() const { return "random_uniform"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this, true}.has(2);
return inputs.at(1);
}
argument compute(const shape&, std::vector<argument> args) const
{
// Output goes into the passed buffer, not the shape output.
auto result = args[1];
uint64_t local_seed = args[0].at<uint64_t>(0);
std::mt19937 gen(local_seed);
result.visit([&](auto output) {
using type = typename decltype(output)::value_type;
if constexpr(std::is_integral<type>{})
{
// default range for all integer types is
// (0, std::uniform_int_distribution<type>::max()).
// Todo: enable different ranges
std::uniform_int_distribution<type> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
else
{
// default real distribution type is double with range (0, 1);
std::uniform_real_distribution<> dis;
std::generate(output.begin(), output.end(), [&] { return dis(gen); });
}
});
return result;
}
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 1; }
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment