Commit 7702c20d authored by Paul's avatar Paul
Browse files

Merge

parents c362e7fa 9afce86d
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include <migraphx/permutation.hpp> #include <migraphx/permutation.hpp>
#include <migraphx/ranges.hpp>
#include <numeric> #include <numeric>
#include <algorithm> #include <algorithm>
#include <functional> #include <functional>
...@@ -65,13 +66,21 @@ struct shape_impl ...@@ -65,13 +66,21 @@ struct shape_impl
std::is_sorted(m_strides.rbegin(), m_strides.rend()); std::is_sorted(m_strides.rbegin(), m_strides.rend());
} }
shape_impl(shape::type_t t, std::vector<shape::dynamic_dimension> dims)
: m_type(t), m_dyn_dims(std::move(dims))
{
}
shape_impl(const std::vector<shape>& subs) : m_type(shape::tuple_type), m_shapes(subs) {} shape_impl(const std::vector<shape>& subs) : m_type(shape::tuple_type), m_shapes(subs) {}
shape::type_t m_type; shape::type_t m_type;
std::vector<std::size_t> m_lens = {}; std::vector<std::size_t> m_lens = {};
std::vector<std::size_t> m_strides = {}; std::vector<std::size_t> m_strides = {};
std::vector<shape> m_shapes = {}; std::vector<shape> m_shapes = {};
bool m_standard = false; bool m_standard = false;
std::vector<shape::dynamic_dimension> m_dyn_dims = {};
void calculate_strides() void calculate_strides()
{ {
m_strides.clear(); m_strides.clear();
...@@ -87,6 +96,12 @@ struct shape_impl ...@@ -87,6 +96,12 @@ struct shape_impl
std::size_t element_space() const std::size_t element_space() const
{ {
if(not m_dyn_dims.empty())
{
auto maxes = max_lens();
return std::accumulate(maxes.begin(), maxes.end(), std::size_t{1}, std::multiplies<>());
}
assert(m_lens.size() == m_strides.size()); assert(m_lens.size() == m_strides.size());
if(m_lens.empty()) if(m_lens.empty())
return 0; return 0;
...@@ -101,6 +116,11 @@ struct shape_impl ...@@ -101,6 +116,11 @@ struct shape_impl
std::size_t elements() const std::size_t elements() const
{ {
if(not m_dyn_dims.empty())
{
MIGRAPHX_THROW("SHAPE: elements() called on dynamic shape");
}
assert(m_lens.size() == m_strides.size()); assert(m_lens.size() == m_strides.size());
if(m_lens.empty()) if(m_lens.empty())
return 0; return 0;
...@@ -108,6 +128,35 @@ struct shape_impl ...@@ -108,6 +128,35 @@ struct shape_impl
m_lens.begin(), m_lens.end(), std::size_t{1}, std::multiplies<std::size_t>()); m_lens.begin(), m_lens.end(), std::size_t{1}, std::multiplies<std::size_t>());
} }
std::vector<std::size_t> min_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.min; });
return ret;
}
std::vector<std::size_t> max_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.max; });
return ret;
}
std::vector<std::size_t> opt_lens() const
{
std::vector<std::size_t> ret(m_dyn_dims.size());
std::transform(m_dyn_dims.cbegin(),
m_dyn_dims.cend(),
ret.begin(),
[](shape::dynamic_dimension x) { return x.opt; });
return ret;
}
// Does the shape skip over elements? // Does the shape skip over elements?
bool skips() const bool skips() const
{ {
...@@ -165,6 +214,16 @@ shape::shape(type_t t, std::vector<std::size_t> l, std::vector<std::size_t> s) ...@@ -165,6 +214,16 @@ shape::shape(type_t t, std::vector<std::size_t> l, std::vector<std::size_t> s)
{ {
} }
shape::shape(type_t t, std::initializer_list<std::size_t> d)
: shape::shape(t, std::vector<std::size_t>{d.begin(), d.end()})
{
}
shape::shape(type_t t, std::vector<shape::dynamic_dimension> dims)
: impl(std::make_shared<shape_impl>(t, std::move(dims)))
{
}
shape::shape(const std::vector<shape>& subs) : impl(std::make_shared<shape_impl>(subs)) {} shape::shape(const std::vector<shape>& subs) : impl(std::make_shared<shape_impl>(subs)) {}
shape::shape(std::shared_ptr<shape_impl> pimpl) : impl(std::move(pimpl)) {} shape::shape(std::shared_ptr<shape_impl> pimpl) : impl(std::move(pimpl)) {}
...@@ -180,9 +239,13 @@ shape shape::from_permutation(type_t t, ...@@ -180,9 +239,13 @@ shape shape::from_permutation(type_t t,
} }
shape::type_t shape::type() const { return impl->m_type; } shape::type_t shape::type() const { return impl->m_type; }
const std::vector<std::size_t>& shape::lens() const { return impl->m_lens; } const std::vector<std::size_t>& shape::lens() const { return impl->m_lens; }
const std::vector<std::size_t>& shape::strides() const { return impl->m_strides; } const std::vector<std::size_t>& shape::strides() const { return impl->m_strides; }
std::size_t shape::elements() const { return impl->elements(); } std::size_t shape::elements() const { return impl->elements(); }
std::size_t shape::bytes() const std::size_t shape::bytes() const
{ {
if(this->sub_shapes().empty()) if(this->sub_shapes().empty())
...@@ -199,6 +262,7 @@ std::size_t shape::bytes() const ...@@ -199,6 +262,7 @@ std::size_t shape::bytes() const
[&](auto x, auto y) { return x + y.bytes(); }); [&](auto x, auto y) { return x + y.bytes(); });
} }
} }
std::size_t shape::type_size() const std::size_t shape::type_size() const
{ {
std::size_t n = 0; std::size_t n = 0;
...@@ -206,20 +270,35 @@ std::size_t shape::type_size() const ...@@ -206,20 +270,35 @@ std::size_t shape::type_size() const
this->visit_type([&](auto as) { n = as.size(); }); this->visit_type([&](auto as) { n = as.size(); });
return n; return n;
} }
std::size_t shape::index(std::initializer_list<std::size_t> l) const std::size_t shape::index(std::initializer_list<std::size_t> l) const
{ {
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(l.size() <= this->lens().size()); assert(l.size() <= this->lens().size());
assert(this->lens().size() == this->strides().size()); assert(this->lens().size() == this->strides().size());
return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0}); return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0});
} }
std::size_t shape::index(const std::vector<std::size_t>& l) const std::size_t shape::index(const std::vector<std::size_t>& l) const
{ {
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(l.size() <= this->lens().size()); assert(l.size() <= this->lens().size());
assert(this->lens().size() == this->strides().size()); assert(this->lens().size() == this->strides().size());
return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0}); return std::inner_product(l.begin(), l.end(), this->strides().begin(), std::size_t{0});
} }
std::size_t shape::index(std::size_t i) const std::size_t shape::index(std::size_t i) const
{ {
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: index() called on dynamic shape");
}
assert(this->lens().size() == this->strides().size()); assert(this->lens().size() == this->strides().size());
if(this->standard()) if(this->standard())
return i; return i;
...@@ -267,12 +346,20 @@ void shape::multi_copy(std::size_t i, std::size_t* start, const std::size_t* end ...@@ -267,12 +346,20 @@ void shape::multi_copy(std::size_t i, std::size_t* start, const std::size_t* end
bool shape::packed() const bool shape::packed() const
{ {
if(this->dynamic())
{
return false;
}
return this->sub_shapes().empty() and not impl->skips() and return this->sub_shapes().empty() and not impl->skips() and
this->elements() == this->element_space(); this->elements() == this->element_space();
} }
bool shape::transposed() const bool shape::transposed() const
{ {
if(this->dynamic())
{
return false;
}
if(this->broadcasted()) if(this->broadcasted())
{ {
// TODO: Use a filter_iterator instead // TODO: Use a filter_iterator instead
...@@ -292,6 +379,10 @@ bool shape::transposed() const ...@@ -292,6 +379,10 @@ bool shape::transposed() const
bool shape::broadcasted() const bool shape::broadcasted() const
{ {
if(this->dynamic())
{
return false;
}
assert(this->lens().size() == this->strides().size()); assert(this->lens().size() == this->strides().size());
return std::any_of( return std::any_of(
this->strides().begin(), this->strides().end(), [](auto x) { return x == 0; }); this->strides().begin(), this->strides().end(), [](auto x) { return x == 0; });
...@@ -299,6 +390,10 @@ bool shape::broadcasted() const ...@@ -299,6 +390,10 @@ bool shape::broadcasted() const
bool shape::scalar() const bool shape::scalar() const
{ {
if(this->dynamic())
{
return false;
}
assert(this->lens().size() == this->strides().size()); assert(this->lens().size() == this->strides().size());
// if any stride > 0, then accumulate will return false // if any stride > 0, then accumulate will return false
return this->sub_shapes().empty() and return this->sub_shapes().empty() and
...@@ -317,6 +412,10 @@ shape shape::normalize_standard() const ...@@ -317,6 +412,10 @@ shape shape::normalize_standard() const
shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const
{ {
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: with_lens() called on dynamic shape");
}
assert(l.size() == this->lens().size()); assert(l.size() == this->lens().size());
auto perm = find_permutation(*this); auto perm = find_permutation(*this);
return shape::from_permutation(t, l, perm); return shape::from_permutation(t, l, perm);
...@@ -324,6 +423,10 @@ shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const ...@@ -324,6 +423,10 @@ shape shape::with_lens(type_t t, const std::vector<std::size_t>& l) const
shape shape::with_lens(const std::vector<std::size_t>& l) const shape shape::with_lens(const std::vector<std::size_t>& l) const
{ {
if(this->dynamic())
{
MIGRAPHX_THROW("SHAPE: with_lens() called on dynamic shape");
}
return this->with_lens(this->type(), l); return this->with_lens(this->type(), l);
} }
...@@ -338,20 +441,80 @@ std::size_t shape::element_space() const { return impl->element_space(); } ...@@ -338,20 +441,80 @@ std::size_t shape::element_space() const { return impl->element_space(); }
std::string shape::type_string() const { return name(this->type()); } std::string shape::type_string() const { return name(this->type()); }
bool shape::dynamic() const { return not impl->m_dyn_dims.empty(); }
const std::vector<shape::dynamic_dimension>& shape::dyn_dims() const { return impl->m_dyn_dims; }
std::vector<std::size_t> shape::min_lens() const
{
return this->dynamic() ? impl->min_lens() : this->lens();
}
std::vector<std::size_t> shape::max_lens() const
{
return this->dynamic() ? impl->max_lens() : this->lens();
}
std::vector<std::size_t> shape::opt_lens() const
{
return this->dynamic() ? impl->opt_lens() : this->lens();
}
bool shape::dynamic_dimension::is_fixed() const { return this->min == this->max; }
bool shape::dynamic_dimension::has_optimal() const { return opt != 0; }
template <class Self, class F>
auto shape::dynamic_dimension::reflect(Self& self, F f)
{
return pack(f(self.min, "min"), f(self.max, "max"), f(self.opt, "opt"));
}
bool operator==(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y)
{
return (x.min == y.min and x.max == y.max and x.opt == y.opt);
}
bool operator!=(const shape::dynamic_dimension& x, const shape::dynamic_dimension& y)
{
return !(x == y);
}
std::ostream& operator<<(std::ostream& os, const shape::dynamic_dimension& x)
{
os << "[" << x.min << ", " << x.max << ", " << x.opt << "]";
return os;
}
bool operator==(const shape& x, const shape& y) bool operator==(const shape& x, const shape& y)
{ {
return x.impl == y.impl or (x.type() == y.type() and x.lens() == y.lens() and if(x.dynamic() and y.dynamic())
x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes()); {
return x.impl == y.impl or (x.type() == y.type() and x.dyn_dims() == y.dyn_dims() and
x.sub_shapes() == y.sub_shapes());
}
return x.impl == y.impl or
(x.dynamic() == y.dynamic() and x.type() == y.type() and x.lens() == y.lens() and
x.strides() == y.strides() and x.sub_shapes() == y.sub_shapes());
} }
bool operator!=(const shape& x, const shape& y) { return !(x == y); } bool operator!=(const shape& x, const shape& y) { return !(x == y); }
std::ostream& operator<<(std::ostream& os, const shape& x) std::ostream& operator<<(std::ostream& os, const shape& x)
{ {
if(x.sub_shapes().empty()) if(x.sub_shapes().empty())
{ {
os << x.type_string() << ", "; if(x.dynamic())
os << "{" << to_string_range(x.lens()) << "}, "; {
os << "{" << to_string_range(x.strides()) << "}"; os << "dynamic, ";
os << x.type_string() << ", ";
os << "{" << to_string_range(x.dyn_dims()) << "}";
}
else
{
os << x.type_string() << ", ";
os << "{" << to_string_range(x.lens()) << "}, ";
os << "{" << to_string_range(x.strides()) << "}";
}
} }
else else
{ {
...@@ -375,12 +538,14 @@ const std::vector<shape>& shape::sub_shapes() const { return impl->m_shapes; } ...@@ -375,12 +538,14 @@ const std::vector<shape>& shape::sub_shapes() const { return impl->m_shapes; }
void migraphx_to_value(value& v, const shape& s) void migraphx_to_value(value& v, const shape& s)
{ {
value result; value result;
result["type"] = migraphx::to_value(s.type_string()); result["type"] = migraphx::to_value(s.type_string());
result["lens"] = migraphx::to_value(s.lens()); result["lens"] = migraphx::to_value(s.lens());
result["strides"] = migraphx::to_value(s.strides()); result["strides"] = migraphx::to_value(s.strides());
result["sub_shapes"] = migraphx::to_value(s.sub_shapes()); result["sub_shapes"] = migraphx::to_value(s.sub_shapes());
v = result; result["dynamic_dimensions"] = migraphx::to_value(s.dyn_dims());
v = result;
} }
void migraphx_from_value(const value& v, shape& s) void migraphx_from_value(const value& v, shape& s)
{ {
auto t = v.at("type").get_string(); auto t = v.at("type").get_string();
...@@ -390,9 +555,25 @@ void migraphx_from_value(const value& v, shape& s) ...@@ -390,9 +555,25 @@ void migraphx_from_value(const value& v, shape& s)
} }
else else
{ {
s = shape{shape::parse_type(t), if(v.at("dynamic_dimensions").empty())
v.at("lens").to_vector<std::size_t>(), {
v.at("strides").to_vector<std::size_t>()}; s = shape{shape::parse_type(t),
v.at("lens").to_vector<std::size_t>(),
v.at("strides").to_vector<std::size_t>()};
}
else
{
auto v_dd = v.at("dynamic_dimensions");
std::vector<shape::dynamic_dimension> dyn_dims(v.at("dynamic_dimensions").size());
std::transform(v_dd.begin(), v_dd.end(), dyn_dims.begin(), [](migraphx::value x) {
auto x_min = x.at("min").template to<size_t>();
auto x_max = x.at("max").template to<size_t>();
auto x_opt = x.at("opt").template to<size_t>();
return shape::dynamic_dimension{x_min, x_max, x_opt};
});
s = shape{shape::parse_type(t), dyn_dims};
}
} }
} }
......
...@@ -151,8 +151,11 @@ struct find_transpose ...@@ -151,8 +151,11 @@ struct find_transpose
{ {
auto matcher() const auto matcher() const
{ {
return match::name("transpose")(match::none_of( auto output_not_transpose =
match::skip_output(match::name("contiguous"))(match::name("transpose")))); match::none_of(match::skip_output(match::name("contiguous"))(match::name("transpose")));
auto input_has_transpose =
match::args(match::skip(match::name("contiguous"))(match::name("transpose")));
return match::name("transpose")(output_not_transpose, input_has_transpose);
} }
void apply(module& m, const match::matcher_result& mr) const void apply(module& m, const match::matcher_result& mr) const
...@@ -272,7 +275,7 @@ struct find_concat_transpose ...@@ -272,7 +275,7 @@ struct find_concat_transpose
{ {
auto matcher() const auto matcher() const
{ {
return match::name("concat")(match::all_of[match::inputs()](match::transpose_shape())); return match::name("concat")(match::all_of[match::inputs()](match::name("transpose")));
} }
void apply(module& m, const match::matcher_result& mr) const void apply(module& m, const match::matcher_result& mr) const
...@@ -601,9 +604,157 @@ struct find_transpose_contiguous_reshaper_unary ...@@ -601,9 +604,157 @@ struct find_transpose_contiguous_reshaper_unary
} }
}; };
struct find_slice_transpose
{
auto matcher() const
{
return match::any(match::any_of[match::outputs()](
match::name("slice")(match::output(match::name("transpose")))));
}
static std::vector<int64_t> find_common_perm(const std::vector<instruction_ref>& transposes)
{
std::map<std::vector<int64_t>, int64_t> count;
for(auto t : transposes)
{
auto perm = t->get_operator().to_value()["permutation"].to_vector<int64_t>();
count[perm]++;
}
return std::max_element(
count.begin(), count.end(), by(std::less<>{}, [](auto&& p) { return p.second; }))
->first;
}
void apply(module& m, const match::matcher_result& r) const
{
auto ins = r.result;
std::vector<instruction_ref> splits;
std::copy_if(ins->outputs().begin(),
ins->outputs().end(),
std::back_inserter(splits),
[&](instruction_ref out) {
return out->name() == "slice" and out->outputs().size() == 1 and
out->outputs().front()->name() == "transpose";
});
if(splits.size() < 2)
return;
std::vector<instruction_ref> transposes;
std::transform(splits.begin(),
splits.end(),
std::back_inserter(transposes),
[](auto split) { return split->outputs().front(); });
auto perm = find_common_perm(transposes);
auto iperm = invert_permutation(perm);
auto pre = m.insert_instruction(
std::next(ins), make_op("transpose", {{"permutation", perm}}), ins);
for(auto i : range(transposes.size()))
{
auto split = splits[i];
auto t = transposes[i];
auto op = any_cast<op::slice>(split->get_operator());
std::transform(op.axes.begin(), op.axes.end(), op.axes.begin(), [&](auto axis) {
return iperm[axis];
});
auto new_ins = m.insert_instruction(t, op, pre);
if(t->get_operator() != pre->get_operator())
{
auto curr = t->get_operator().to_value()["permutation"].to_vector<int64_t>();
new_ins = m.insert_instruction(
t, make_op("transpose", {{"permutation", reorder_dims(iperm, curr)}}), new_ins);
}
m.replace_instruction(t, new_ins);
}
}
};
struct find_transpose_slice
{
auto matcher() const
{
return match::name("transpose")(match::all_of[match::outputs()](match::name("slice")));
}
static std::vector<int64_t> slice_distance(const op::slice& op)
{
assert(op.starts.size() == op.ends.size());
std::vector<int64_t> result(op.starts.size());
std::transform(
op.ends.begin(), op.ends.end(), op.starts.begin(), result.begin(), std::minus<>{});
return result;
}
void apply(module& m, const match::matcher_result& r) const
{
auto ins = r.result;
auto slices = ins->outputs();
if(slices.empty())
return;
auto slice = any_cast<op::slice>(slices.front()->get_operator());
auto sdistance = slice_distance(slice);
// Check all distances and axes are the same
if(std::any_of(slices.begin(), slices.end(), [&](auto sins) {
auto s = any_cast<op::slice>(sins->get_operator());
return s.axes != slice.axes or slice_distance(s) != sdistance;
}))
return;
// Check distances are divisible by lens of corresponding axes
auto mod_by_distance = [&](const auto& v, auto f) {
return std::inner_product(v.begin(),
v.end(),
sdistance.begin(),
0,
std::plus<>{},
[&](auto x, auto d) -> uint64_t {
if(d == 0)
return 1;
return f(x) % d;
});
};
if(mod_by_distance(slice.axes, [&](auto x) { return ins->get_shape().lens()[x]; }) != 0 or
mod_by_distance(slice.starts, id{}) != 0 or mod_by_distance(slice.ends, id{}) != 0)
return;
// TODO: Handle multiple axes
if(sdistance.size() != 1)
return;
auto axis = slice.axes.front();
// Skip if axis would be packed
if(std::all_of(ins->get_shape().lens().begin(),
ins->get_shape().lens().begin() + axis,
[](auto x) { return x == 1; }))
return;
// Compute axis before transpose to use for unsqueeze
auto perm = ins->get_operator().to_value()["permutation"].to_vector<int64_t>();
auto preaxis = std::find(perm.begin(), perm.end(), axis) - perm.begin();
// Make unsqeeze
auto unsqueeze = m.insert_instruction(
ins, make_op("unsqueeze", {{"axes", {preaxis}}, {"steps", sdistance}}), ins->inputs());
// Make transpose
std::transform(perm.begin(), perm.end(), perm.begin(), [&](auto i) {
if(i > preaxis)
return i + 1;
return i;
});
perm.insert(perm.begin(), preaxis + 1);
auto transpose =
m.insert_instruction(ins, make_op("transpose", {{"permutation", perm}}), unsqueeze);
// Slice and squeeze
for(auto s : slices)
{
auto op = any_cast<op::slice>(s->get_operator());
op.axes = {0};
op.starts = {op.starts.front() / sdistance.front()};
op.ends = {op.ends.front() / sdistance.front()};
auto slice_ins = m.insert_instruction(ins, op, transpose);
auto squeeze =
m.insert_instruction(ins, make_op("squeeze", {{"axes", {0}}}), slice_ins);
m.replace_instruction(s, squeeze);
}
}
};
void simplify_reshapes::apply(module& m) const void simplify_reshapes::apply(module& m) const
{ {
for(int i = 0; i < 2; i++) for(int i = 0; i < 4; i++)
{ {
match::find_matches(m, match::find_matches(m,
find_where_op{}, find_where_op{},
...@@ -616,6 +767,8 @@ void simplify_reshapes::apply(module& m) const ...@@ -616,6 +767,8 @@ void simplify_reshapes::apply(module& m) const
find_nested_convert{}, find_nested_convert{},
find_nested_slice{}, find_nested_slice{},
find_nested_concat{}, find_nested_concat{},
find_transpose_slice{},
find_slice_transpose{},
find_transpose_contiguous_reshaper_unary{}); find_transpose_contiguous_reshaper_unary{});
dead_code_elimination{}.apply(m); dead_code_elimination{}.apply(m);
} }
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/sqlite.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/errors.hpp>
#include <sqlite3.h>
#include <algorithm>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
using sqlite3_ptr = MIGRAPHX_MANAGE_PTR(sqlite3*, sqlite3_close);
struct sqlite_impl
{
sqlite3* get() const { return ptr.get(); }
void open(const fs::path& p, int flags)
{
sqlite3* ptr_tmp = nullptr;
int rc = sqlite3_open_v2(p.string().c_str(), &ptr_tmp, flags, nullptr);
ptr = sqlite3_ptr{ptr_tmp};
if(rc != 0)
MIGRAPHX_THROW("error opening " + p.string() + ": " + error_message());
}
template <class F>
void exec(const char* sql, F f)
{
auto callback = [](void* obj, auto... xs) -> int {
try
{
const auto* g = static_cast<const F*>(obj);
(*g)(xs...);
return 0;
}
catch(...)
{
return -1;
}
};
int rc = sqlite3_exec(get(), sql, callback, &f, nullptr);
if(rc != 0)
MIGRAPHX_THROW(error_message());
}
std::string error_message() const
{
std::string msg = "sqlite3: ";
return msg + sqlite3_errmsg(get());
}
sqlite3_ptr ptr;
};
sqlite sqlite::read(const fs::path& p)
{
sqlite r;
r.impl = std::make_shared<sqlite_impl>();
r.impl->open(p, SQLITE_OPEN_READONLY);
return r;
}
sqlite sqlite::write(const fs::path& p)
{
sqlite r;
r.impl = std::make_shared<sqlite_impl>();
// Using '+' instead of bitwise '|' to avoid compilation warning
r.impl->open(p, SQLITE_OPEN_READWRITE + SQLITE_OPEN_CREATE);
return r;
}
std::vector<std::unordered_map<std::string, std::string>> sqlite::execute(const std::string& s)
{
std::vector<std::unordered_map<std::string, std::string>> result;
impl->exec(s.c_str(), [&](int n, char** texts, char** names) {
std::unordered_map<std::string, std::string> row;
row.reserve(n);
std::transform(
names,
names + n,
texts,
std::inserter(row, row.begin()),
[&](const char* name, const char* text) { return std::make_pair(name, text); });
result.push_back(row);
});
return result;
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/target_assignments.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void target_assignments::add_assignment(instruction_ref ins, const std::string& target)
{
assignments.emplace(ins, target);
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <migraphx/module.hpp> #include <migraphx/module.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp> #include <migraphx/iterator_for.hpp>
#include <migraphx/register_op.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -52,6 +53,7 @@ struct cpu_literal ...@@ -52,6 +53,7 @@ struct cpu_literal
return os; return os;
} }
}; };
MIGRAPHX_REGISTER_OP(cpu_literal);
void write_literals::apply(module& m) const void write_literals::apply(module& m) const
{ {
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
add_library(migraphx_fpga
target.cpp
lowering.cpp
subgraph.cpp
vitis_ai_adapter.cpp
)
set_target_properties(migraphx_fpga PROPERTIES EXPORT_NAME fpga)
rocm_set_soversion(migraphx_fpga ${MIGRAPHX_SO_VERSION})
rocm_clang_tidy_check(migraphx_fpga)
target_link_libraries(migraphx_fpga migraphx)
rocm_install_targets(
TARGETS migraphx_fpga
INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/include
)
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
#define MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct context
{
int id = 0;
void finish() const {}
};
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_CONTEXT_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_LOWERING_HPP
#define MIGRAPHX_GUARD_FPGA_LOWERING_HPP
#include <migraphx/program.hpp>
#include <migraphx/config.hpp>
#include <migraphx/fpga/context.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct lowering
{
context* ctx = nullptr;
std::string name() const { return "fpga::lowering"; }
void apply(module& m) const;
};
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_LOWERING_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_SUBGRAPH_HPP
#define MIGRAPHX_GUARD_FPGA_SUBGRAPH_HPP
#include <migraphx/program.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct subgraph
{
std::string name() const { return "fpga::subgraph"; }
void apply(module_pass_manager& mpm) const;
};
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_SUBGRAPH_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_TARGET_HPP
#define MIGRAPHX_GUARD_FPGA_TARGET_HPP
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/fpga/context.hpp>
#include <migraphx/config.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct pass;
namespace fpga {
struct target
{
std::string name() const;
std::vector<pass> get_passes(migraphx::context& ctx, const compile_options&) const;
migraphx::context get_context() const { return context{}; }
float is_supported(instruction_ref ins, support_metric m);
argument copy_to(const argument& arg) const { return arg; }
argument copy_from(const argument& arg) const { return arg; }
argument allocate(const shape& s) const;
};
MIGRAPHX_REGISTER_TARGET(target);
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_FPGA_TARGET_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_FPGA_VITIS_AI_ADAPTER_HPP
#define MIGRAPHX_GUARD_FPGA_VITIS_AI_ADAPTER_HPP
#include <string>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
namespace vitis_ai {
class x_model
{
migraphx::shape shape;
public:
migraphx::shape get_shape() const;
void set_shape(migraphx::shape);
};
x_model create_xmodel(migraphx::module_ref mod);
migraphx::argument execute(const x_model& xmodel,
const migraphx::shape& output_shape,
std::vector<migraphx::argument>& args);
} // namespace vitis_ai
#endif // MIGRAPHX_GUARD_FPGA_VITIS_AI_ADAPTER_HPP
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fpga/lowering.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/register_op.hpp>
#include <migraphx/stringutils.hpp>
#include <iostream>
#include "migraphx/fpga/vitis_ai_adapter.hpp"
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct fpga_vitis_op
{
fpga_vitis_op() = default;
explicit fpga_vitis_op(vitis_ai::x_model model) : xmodel(std::move(model)){};
vitis_ai::x_model xmodel;
int dummy = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
// return pack(f(self.xmodel, "xmodel"));
return pack(f(self.dummy, "dummy"));
}
std::string name() const { return "fpga::vitis_ai"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
(void)inputs;
return xmodel.get_shape();
}
argument
compute(const context& ctx, const shape& output_shape, std::vector<argument> args) const
{
std::cout << "The context is " << ctx.id << std::endl;
return ::vitis_ai::execute(xmodel, output_shape, args);
}
};
MIGRAPHX_REGISTER_OP(fpga_vitis_op)
void lowering::apply(module& m) const
{
auto* mod = &m;
// test modifying the context from a pass
ctx->id = 2;
for(auto it : iterator_for(*mod))
{
if(it->name() == "fpga::vitis_placeholder")
{
assert(it->module_inputs().size() == 1);
auto xmodel = ::vitis_ai::create_xmodel(it->module_inputs()[0]);
mod->replace_instruction(it, fpga_vitis_op{xmodel}, it->inputs());
}
}
}
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fpga/subgraph.hpp>
#include <migraphx/instruction.hpp>
#include "migraphx/iterator.hpp"
#include <migraphx/iterator_for.hpp>
#include "migraphx/make_op.hpp"
#include "migraphx/module.hpp"
#include "migraphx/ranges.hpp"
#include <migraphx/register_op.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/pass_manager.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
struct fpga_placeholder_op
{
fpga_placeholder_op() = default;
int dummy = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.dummy, "dummy"));
}
std::string name() const { return "fpga::vitis_placeholder"; }
shape compute_shape(const std::vector<shape>& inputs, std::vector<module_ref> mods) const
{
(void)inputs;
if(mods.size() != 1)
{
MIGRAPHX_THROW("should have one submodule.");
}
module_ref sm = mods.front();
if(sm->get_output_shapes().size() != 1)
MIGRAPHX_THROW("Only one return");
return sm->get_output_shapes().front();
}
};
MIGRAPHX_REGISTER_OP(fpga_placeholder_op)
bool is_fpga_instr(migraphx::instruction_ref it)
{
// assuming all instructions that aren't @param, @literal, or input data are fpga instrs
if(migraphx::starts_with(it->name(), "@"))
{
return false;
}
// no inputs to the instr means it's input data
if(it->inputs().empty())
{
return false;
}
return true;
}
void subgraph::apply(module_pass_manager& mpm) const
{
auto& mod = mpm.get_module();
auto* pm = mpm.create_module(mod.name() + ":fpga");
pm->set_bypass();
migraphx::instruction_ref first = mod.end();
migraphx::instruction_ref last;
std::vector<migraphx::instruction_ref> literal_inputs;
for(auto it : iterator_for(mod))
{
// assuming we want all the params/literals as inputs to the FPGA submodule
if(migraphx::starts_with(it->name(), "@param") ||
migraphx::starts_with(it->name(), "@literal"))
{
literal_inputs.push_back(it);
}
if(is_fpga_instr(it))
{
if(first == mod.end())
{
first = it;
}
last = it;
}
}
// TODO(varunsh): this code may be replaceable by code in the fuse_pointwise pass
// assuming all FPGA instructions are in one contiguous range
pm->insert_instructions(pm->end(), first, last, {});
migraphx::instruction_ref placeholder_ins;
for(auto it : iterator_for(mod))
{
if(migraphx::starts_with(it->name(), "@return"))
{
placeholder_ins = mod.insert_instruction(
it, migraphx::make_op("fpga::vitis_placeholder"), literal_inputs, {pm});
break;
}
}
mod.replace_return({placeholder_ins});
}
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fpga/target.hpp>
#include <migraphx/fpga/lowering.hpp>
#include <migraphx/fpga/subgraph.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/pass.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/insert_pad.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/normalize_ops.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace fpga {
std::string target::name() const { return "fpga"; }
std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options&) const
{
// not sure if all these passes are needed but they were copied from ref/
auto& ctx = any_cast<context>(gctx);
return {normalize_ops{},
eliminate_pad{},
dead_code_elimination{},
insert_pad{},
dead_code_elimination{},
rewrite_rnn{},
dead_code_elimination{},
auto_contiguous{},
dead_code_elimination{},
subgraph{},
dead_code_elimination{},
lowering{&ctx},
dead_code_elimination{}};
}
argument target::allocate(const shape& s) const { return fill_argument(s, 0); }
float is_supported(instruction_ref ins, support_metric m)
{
// for now, not using the ins and metric to return a value
(void)ins;
(void)m;
return 1.0;
}
MIGRAPHX_REGISTER_TARGET(target);
} // namespace fpga
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "migraphx/fpga/vitis_ai_adapter.hpp"
#include "migraphx/module.hpp"
#include "migraphx/stringutils.hpp"
namespace vitis_ai {
migraphx::shape x_model::get_shape() const { return shape; };
void x_model::set_shape(migraphx::shape s) { shape = s; }
x_model create_xmodel(const migraphx::module_ref mod)
{
std::cout << "Calling an external function: create_xmodel!\n";
x_model xmodel;
xmodel.set_shape(mod->get_output_shapes());
return xmodel;
}
migraphx::argument execute(const x_model& xmodel,
const migraphx::shape& output_shape,
std::vector<migraphx::argument>& args)
{
(void)xmodel;
std::cout << "Calling an external function: execute!\n";
std::cout << "Output Shape: " << output_shape << std::endl;
std::cout << "Args: " << args.size() << std::endl;
for(const auto& arg : args)
{
std::cout << " " << arg.get_shape() << std::endl;
}
std::cout << std::endl;
migraphx::argument result{output_shape};
return result;
}
} // namespace vitis_ai
...@@ -164,6 +164,7 @@ add_library(migraphx_gpu ...@@ -164,6 +164,7 @@ add_library(migraphx_gpu
deconvolution.cpp deconvolution.cpp
device_name.cpp device_name.cpp
elu.cpp elu.cpp
fuse_mlir.cpp
fuse_ops.cpp fuse_ops.cpp
gather.cpp gather.cpp
gemm_impl.cpp gemm_impl.cpp
...@@ -176,13 +177,14 @@ add_library(migraphx_gpu ...@@ -176,13 +177,14 @@ add_library(migraphx_gpu
loop.cpp loop.cpp
lrn.cpp lrn.cpp
leaky_relu.cpp leaky_relu.cpp
mlir_conv.cpp mlir.cpp
multinomial.cpp multinomial.cpp
nonzero.cpp nonzero.cpp
pack_args.cpp pack_args.cpp
pack_int8_args.cpp pack_int8_args.cpp
prefuse_ops.cpp prefuse_ops.cpp
pad.cpp pad.cpp
perfdb.cpp
pooling.cpp pooling.cpp
quant_convolution.cpp quant_convolution.cpp
reverse.cpp reverse.cpp
...@@ -320,16 +322,26 @@ message(STATUS "extractkernel: ${MIGRAPHX_EXTRACT_KERNEL}") ...@@ -320,16 +322,26 @@ message(STATUS "extractkernel: ${MIGRAPHX_EXTRACT_KERNEL}")
set(MIGRAPHX_ENABLE_MLIR OFF CACHE BOOL "") set(MIGRAPHX_ENABLE_MLIR OFF CACHE BOOL "")
if(MIGRAPHX_ENABLE_MLIR) if(MIGRAPHX_ENABLE_MLIR)
find_library(LIBMLIRMIOPEN MLIRMIOpenThin REQUIRED) find_library(MLIRAPI_LIBRARY MLIRMIOpen
PATH_SUFFIXES
# Workaournd broken mlir install
lib/ lib/lib)
# REQUIRED is not supported before cmake 3.18 # REQUIRED is not supported before cmake 3.18
if(NOT LIBMLIRMIOPEN) if(NOT MLIRAPI_LIBRARY)
message(FATAL_ERROR "libMLIRMIOpenThin not found") message(FATAL_ERROR "libMLIRMIOpen not found")
else() else()
message(STATUS "Build with libMLIRMIOpenThin: " ${LIBMLIRMIOPEN}) message(STATUS "Build with libMLIRMIOpen: " ${MLIRAPI_LIBRARY})
endif() endif()
target_compile_definitions(migraphx_gpu PRIVATE "-DMIGRAPHX_MLIR_MIOPEN_SUPPORT") find_path(MLIRAPI_HEADERS NAMES mlir-c/Dialect/MIGraphX.h)
target_link_libraries(migraphx_gpu PUBLIC ${LIBMLIRMIOPEN}) # Workaround MLIR broken installation
find_path(MLIRAPI_HEADERS2 NAMES mlir-c/Registration.h
PATH_SUFFIXES
include/external/include external/include)
target_compile_definitions(migraphx_gpu PRIVATE "-DMIGRAPHX_MLIR")
target_include_directories(migraphx_gpu SYSTEM PRIVATE ${MLIRAPI_HEADERS} ${MLIRAPI_HEADERS2})
target_link_libraries(migraphx_gpu PUBLIC ${MLIRAPI_LIBRARY})
endif() endif()
set(MIGRAPHX_USE_HIPRTC OFF CACHE BOOL "") set(MIGRAPHX_USE_HIPRTC OFF CACHE BOOL "")
......
...@@ -52,7 +52,7 @@ code_object_op::compute(context& ctx, const shape&, const std::vector<argument>& ...@@ -52,7 +52,7 @@ code_object_op::compute(context& ctx, const shape&, const std::vector<argument>&
std::transform( std::transform(
args.begin(), args.end(), kargs.begin(), [](const argument& a) { return a.data(); }); args.begin(), args.end(), kargs.begin(), [](const argument& a) { return a.data(); });
k.launch(ctx.get_stream().get(), global, local, std::move(kargs)); k.launch(ctx.get_stream().get(), global, local, std::move(kargs));
return args.back(); return args[get_output_arg(args.size())];
} }
void code_object_op::finalize(context&, const shape&, const std::vector<shape>&) void code_object_op::finalize(context&, const shape&, const std::vector<shape>&)
{ {
......
...@@ -50,6 +50,9 @@ static std::vector<std::size_t> vector_sizes(const std::vector<shape>& inputs) ...@@ -50,6 +50,9 @@ static std::vector<std::size_t> vector_sizes(const std::vector<shape>& inputs)
vectorize vectorize::elements(std::size_t axis, const std::vector<shape>& inputs) vectorize vectorize::elements(std::size_t axis, const std::vector<shape>& inputs)
{ {
if(std::all_of(
inputs.begin(), inputs.end(), [&](const auto& s) { return s.lens()[axis] == 1; }))
return {1, axis};
auto sizes = vector_sizes(inputs); auto sizes = vector_sizes(inputs);
std::vector<std::size_t> max_vec_size; std::vector<std::size_t> max_vec_size;
std::transform(inputs.begin(), std::transform(inputs.begin(),
...@@ -81,11 +84,10 @@ preload preload::broadcasts(std::size_t axis, const std::vector<shape>& inputs) ...@@ -81,11 +84,10 @@ preload preload::broadcasts(std::size_t axis, const std::vector<shape>& inputs)
const std::size_t max_lds_bytes = 4096; const std::size_t max_lds_bytes = 4096;
std::vector<bool> result(inputs.size()); std::vector<bool> result(inputs.size());
std::vector<std::size_t> preloaded; std::vector<std::size_t> preloaded;
for(auto i : range(inputs.size())) auto idxs = range(inputs.size());
{ std::copy_if(idxs.begin(), idxs.end(), std::back_inserter(preloaded), [&](auto i) {
if(inputs[i].strides()[axis] == 0) return inputs[i].strides()[axis] == 0;
preloaded.push_back(i); });
}
std::sort(preloaded.begin(), preloaded.end(), by(std::less<>{}, [&](auto i) { std::sort(preloaded.begin(), preloaded.end(), by(std::less<>{}, [&](auto i) {
return inputs[i].bytes(); return inputs[i].bytes();
})); }));
......
...@@ -51,9 +51,9 @@ static const char* const make_tensor_template = R"__migraphx__( ...@@ -51,9 +51,9 @@ static const char* const make_tensor_template = R"__migraphx__(
template<> template<>
struct make_tensor<${n}> struct make_tensor<${n}>
{ {
static __device__ auto apply(void* p) static __device__ auto apply(void* __restrict__ p)
{ {
return make_tensor_view(reinterpret_cast<${type}*>(p), make_shape(${lens}, ${strides})); return make_tensor_view(reinterpret_cast<${type}* __restrict__>(p), make_shape(${lens}, ${strides}));
} }
}; };
)__migraphx__"; )__migraphx__";
......
...@@ -59,31 +59,30 @@ argument miopen_deconvolution::compute(context& ctx, ...@@ -59,31 +59,30 @@ argument miopen_deconvolution::compute(context& ctx,
auto w_desc = make_tensor(reshape_if_1d(args[1].get_shape())); auto w_desc = make_tensor(reshape_if_1d(args[1].get_shape()));
auto y_desc = make_tensor(reshape_if_1d(output_shape)); auto y_desc = make_tensor(reshape_if_1d(output_shape));
float alpha = 1; if(solution_id == 0)
float beta = 0; MIGRAPHX_THROW("MIOpen Deconvolution: invalid solution ID");
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha, auto status = miopenConvolutionForwardImmediate(ctx.get_stream().get_miopen(),
x_desc.get(), w_desc.get(),
args[0].implicit(), args[1].implicit(),
w_desc.get(), x_desc.get(),
args[1].implicit(), args[0].implicit(),
cd.get(), cd.get(),
algo, y_desc.get(),
&beta, args[3].implicit(),
y_desc.get(), args[2].implicit(),
args[3].implicit(), args[2].get_shape().bytes(),
args[2].implicit(), solution_id);
args[2].get_shape().bytes());
if(status != miopenStatusSuccess) if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Running deconvolution failed"); MIGRAPHX_THROW("MIOpen Deconvolution: running convolution failed");
return args[3]; return args[3];
} }
shape miopen_deconvolution::compile(context& ctx, shape miopen_deconvolution::find(context& ctx, const shape& output_shape, std::vector<shape> inputs)
const shape& output_shape,
std::vector<shape> inputs)
{ {
shape workspace_shape{}; shape workspace_shape{};
auto x_desc = make_tensor(reshape_if_1d(inputs[0])); auto x_desc = make_tensor(reshape_if_1d(inputs[0]));
auto w_desc = make_tensor(reshape_if_1d(inputs[1])); auto w_desc = make_tensor(reshape_if_1d(inputs[1]));
auto y_desc = make_tensor(reshape_if_1d(output_shape)); auto y_desc = make_tensor(reshape_if_1d(output_shape));
...@@ -119,9 +118,35 @@ shape miopen_deconvolution::compile(context& ctx, ...@@ -119,9 +118,35 @@ shape miopen_deconvolution::compile(context& ctx,
workspace_size, workspace_size,
false); false);
if(status != miopenStatusSuccess) if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Find deconvolution failed"); MIGRAPHX_THROW("MIOpen Deconvolution: find convolution failed");
handle = ctx.get_stream().get_miopen(); algo = perf.fwd_algo;
algo = perf.fwd_algo;
size_t solution_count;
status = miopenConvolutionForwardGetSolutionCount(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&solution_count);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: get solution count failed");
std::vector<miopenConvSolution_t> solutions(solution_count);
status = miopenConvolutionForwardGetSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_count,
&solution_count,
solutions.data());
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: get solution failed");
solution_id = solutions.front().solution_id;
return shape{shape::int8_type, {perf.memory}}; return shape{shape::int8_type, {perf.memory}};
} }
...@@ -129,13 +154,29 @@ void miopen_deconvolution::finalize(context& ctx, ...@@ -129,13 +154,29 @@ void miopen_deconvolution::finalize(context& ctx,
const shape& output_shape, const shape& output_shape,
std::vector<shape> inputs) std::vector<shape> inputs)
{ {
if(handle == ctx.get_stream().get_miopen()) if(cd == nullptr)
return; cd = make_deconv(op);
// Check that workspace hasn't changed if(solution_id == 0)
auto size = inputs.at(2).bytes(); {
auto ws = compile(ctx, output_shape, std::move(inputs)); // Check that workspace hasn't changed
if(ws.bytes() > size) auto size = inputs.at(2).bytes();
MIGRAPHX_THROW("Workspace has changed during finalization."); auto ws = find(ctx, output_shape, inputs);
if(ws.bytes() > size)
MIGRAPHX_THROW("MIOpen Deconvolution: workspace has changed during finalization.");
}
auto x_desc = make_tensor(reshape_if_1d(inputs[0]));
auto w_desc = make_tensor(reshape_if_1d(inputs[1]));
auto y_desc = make_tensor(reshape_if_1d(output_shape));
auto status = miopenConvolutionForwardCompileSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_id);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: compile solution failed");
} }
} // namespace gpu } // namespace gpu
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment