"src/vscode:/vscode.git/clone" did not exist on "94bba2c108ff87901067355c85d801389484be2a"
Unverified Commit 9fee7233 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge pull request #2019 from ROCmSoftwarePlatform/rel57_workitems

parents 0bc60894 97cc1dfc
......@@ -96,7 +96,7 @@ struct allocation_model
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -267,7 +267,7 @@ struct allocation_model
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -90,7 +90,17 @@ struct param
struct returns
{
std::string name() const { return "@return"; }
shape compute_shape(const std::vector<shape>&) const { return {}; }
shape compute_shape(const std::vector<shape>& arg) const
{
if(arg.empty())
return {};
else if(arg.size() == 1)
return arg[0];
else
return arg;
}
argument compute(context&, const shape&, const std::vector<argument>&) const
{
MIGRAPHX_THROW("builtin");
......
......@@ -88,7 +88,7 @@ struct concat_optimization
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -233,7 +233,7 @@ struct concat_optimization
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -118,7 +118,7 @@ struct context
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -373,7 +373,7 @@ struct context
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -26,6 +26,7 @@
#include <migraphx/config.hpp>
#include <migraphx/filesystem.hpp>
#include <migraphx/optional.hpp>
#include <functional>
#include <memory>
#include <vector>
......@@ -43,6 +44,9 @@ struct MIGRAPHX_EXPORT dynamic_loader
return path(reinterpret_cast<void*>(address));
}
static fs::path path(void* address);
static optional<dynamic_loader> try_load(const fs::path& p);
dynamic_loader() = default;
dynamic_loader(const fs::path& p);
......
......@@ -117,14 +117,14 @@ auto generate_tensor_data(const migraphx::shape& s, unsigned long seed = 0)
}
template <class T>
auto fill_tensor_data(const migraphx::shape& s, unsigned long value = 0)
auto fill_tensor_data(const migraphx::shape& s, double value = 0)
{
auto result = make_shared_array<T>(s.element_space());
std::generate(result.get(), result.get() + s.element_space(), [=] { return value; });
return result;
}
MIGRAPHX_EXPORT argument fill_argument(shape s, unsigned long value = 0);
MIGRAPHX_EXPORT argument fill_argument(shape s, double value = 0);
MIGRAPHX_EXPORT argument generate_argument(shape s, unsigned long seed = 0);
......
......@@ -80,7 +80,7 @@ struct marker
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -233,7 +233,7 @@ struct marker
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -66,7 +66,19 @@ struct convert : unary<convert>
auto type = target_type;
return [type](auto x) {
auto y = x;
shape::visit(type, [&](auto as) { y = as(x); });
shape::visit(type, [&](auto as) {
// clamping value between target_type's max and min doesn't work for NaNs,
if(std::isnan(x))
{
y = as.nan();
}
else
{
// clamp overflowing/underflowing values to min()/max() instead of +/-infinity
// during downcasting
y = std::min(std::max(as(x), as.min()), as.max());
}
});
return y;
};
}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#define MIGRAPHX_GUARD_OPERATORS_DIMENSIONS_OF_HPP
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/dyn_output.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Returns the dimensions of the input argument from starting axis to ending axis.
* Atleast `end` must be set to use this operator (set `end` to ndim for default ONNX behavior of
* `Shape` operator) This should only be used for dynamic shapes as this can be simplified to a
* literal for static shapes.
*/
struct dimensions_of
{
std::size_t start = 0;
std::size_t end = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.start, "start"), f(self.end, "end"));
}
std::string name() const { return "dimensions_of"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this, true}.has(1);
if(start >= end)
{
MIGRAPHX_THROW("DIMENSIONS_OF: start >= end. start = " + std::to_string(start) +
", end = " + std::to_string(end));
}
return shape{shape::int64_type, {end - start}};
}
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
auto input_lens = args[0].get_shape().lens();
result.visit([&](auto output) {
std::copy(input_lens.cbegin() + start, input_lens.cbegin() + end, output.begin());
});
return result;
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -69,7 +69,7 @@ struct multibroadcast
auto make_bcast_strides = [&](std::vector<std::size_t> bcast_lens, std::size_t offset) {
std::vector<size_t> bcast_strides(bcast_lens.size(), 0);
for(std::ptrdiff_t i = s0.lens().size() - 1; i >= 0; i--)
for(std::ptrdiff_t i = s0.ndim() - 1; i >= 0; i--)
{
if(bcast_lens[i + offset] == s0.lens()[i])
{
......@@ -84,13 +84,13 @@ struct multibroadcast
if(s0.dynamic())
MIGRAPHX_THROW(
"MULTIBROADCAST: Single dynamic input shape not supported. Use two inputs.");
if(s0.lens().size() > output_lens.size())
if(s0.ndim() > output_lens.size())
{
MIGRAPHX_THROW("MULTIBROADCAST: input dimensions should <= output size");
}
auto offset = output_lens.size() - s0.lens().size();
for(std::ptrdiff_t i = s0.lens().size() - 1; i >= 0; i--)
auto offset = output_lens.size() - s0.ndim();
for(std::ptrdiff_t i = s0.ndim() - 1; i >= 0; i--)
{
if(output_lens[i + offset] != s0.lens()[i] and s0.lens()[i] != 1)
{
......@@ -119,7 +119,7 @@ struct multibroadcast
{
// output_lens will not be set for 2+ input version
auto bcast_lens = compute_common_lens(inputs);
auto offset = bcast_lens.size() - s0.lens().size();
auto offset = bcast_lens.size() - s0.ndim();
auto bcast_strides = make_bcast_strides(bcast_lens, offset);
return {t, std::move(bcast_lens), std::move(bcast_strides)};
}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_SCAN_OP_HPP
#define MIGRAPHX_GUARD_OPERATORS_SCAN_OP_HPP
......@@ -37,6 +38,12 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
/**
* Parent struct for prefix scan operations. A prefix scan is equivalent to the C++
* std::exclusive_scan or std::inclusive_scan. Given a list of numbers, a prefix scan
* sum op returns an equal size list of running totals of the values. Other operations
* besides addition can be supported by their own child ops.
*/
template <class Derived>
struct prefix_scan_op : op_name<Derived>
{
......@@ -60,9 +67,13 @@ struct prefix_scan_op : op_name<Derived>
shape normalize_compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
check_shapes{inputs, *this, true}.has(1);
auto s = inputs.front();
if(s.broadcasted())
if(s.dynamic())
{
return s;
}
else if(s.broadcasted())
{
return {s.type(), s.lens()};
}
......@@ -72,8 +83,9 @@ struct prefix_scan_op : op_name<Derived>
}
}
argument compute(const shape& output_shape, std::vector<argument> args) const
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{
shape output_shape(dyn_out.computed_shape);
argument result{output_shape};
auto s = args[0].get_shape();
if(s == output_shape)
......
......@@ -251,9 +251,10 @@ auto compute_op(rank<1>,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
F f) -> decltype(x.compute(make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f))
{
return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
}
......@@ -309,9 +310,10 @@ auto compute_op(rank<3>,
const shape& output,
const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args,
F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
F f) -> decltype(x.compute(make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f))
{
return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
}
......@@ -573,7 +575,7 @@ struct operation
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -1263,7 +1265,7 @@ struct operation
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -48,6 +48,7 @@
#include <migraphx/op/convolution_backwards.hpp>
#include <migraphx/op/cosh.hpp>
#include <migraphx/op/cos.hpp>
#include <migraphx/op/dimensions_of.hpp>
#include <migraphx/op/div.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/elu.hpp>
......
......@@ -116,7 +116,7 @@ struct pass
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -292,7 +292,7 @@ struct pass
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -66,6 +66,10 @@ MIGRAPHX_EXPORT std::vector<int64_t> invert_permutation(const std::vector<int64_
MIGRAPHX_EXPORT std::vector<int64_t> find_permutation(const shape& s);
MIGRAPHX_EXPORT std::vector<int64_t> find_permutation(const std::vector<shape>& shapes);
/// Normalize the shapes so the order of dimensions will be in the order it is
/// in memory as much as possible.
MIGRAPHX_EXPORT std::vector<shape> normalize_permutation(const std::vector<shape>& shapes);
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
......@@ -99,7 +99,7 @@ struct schedule_model
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -274,7 +274,7 @@ struct schedule_model
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
......@@ -240,6 +240,10 @@ struct MIGRAPHX_EXPORT shape
template <class Iterator>
std::size_t index(Iterator start, Iterator last) const
{
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(std::distance(start, last) <= this->lens().size());
assert(this->lens().size() == this->strides().size());
return std::inner_product(start, last, this->strides().begin(), std::size_t{0}); // NOLINT
......
......@@ -100,7 +100,7 @@ struct stream_model
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -288,7 +288,7 @@ struct stream_model
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -167,7 +167,7 @@ struct target
{
using std::swap;
auto* derived = this->any_cast<PrivateDetailTypeErasedT>();
if(derived and private_detail_te_handle_mem_var.unique())
if(derived and private_detail_te_handle_mem_var.use_count() == 1)
{
*derived = std::forward<PrivateDetailTypeErasedT>(value);
}
......@@ -428,7 +428,7 @@ struct target
private_detail_te_handle_base_type& private_detail_te_get_handle()
{
assert(private_detail_te_handle_mem_var != nullptr);
if(not private_detail_te_handle_mem_var.unique())
if(private_detail_te_handle_mem_var.use_count() > 1)
private_detail_te_handle_mem_var = private_detail_te_handle_mem_var->clone();
return *private_detail_te_handle_mem_var;
}
......
......@@ -64,10 +64,7 @@ void instruction::replace(const shape& r)
result = r;
for(auto&& ins : output)
{
if(ins->name() == "@return")
continue;
assert(ins->name().front() != '@');
assert(ins->name() == "@return" or ins->name().front() != '@');
ins->recompute_shape();
}
}
......@@ -122,10 +119,6 @@ bool instruction::valid() const
{
computed = result;
}
else if(op.name() == "@return")
{
computed = {};
}
else
{
try
......@@ -145,6 +138,7 @@ bool instruction::valid() const
}
shape instruction::get_shape() const { return result; }
const literal& instruction::get_literal() const
{
assert(op.name() == "@literal");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment