Commit f9437603 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mi100_opts

parents 781ce146 658cdab0
#ifndef MIGRAPHX_GUARD_MIGRAPHX_PROCESS_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_PROCESS_HPP
#include <migraphx/config.hpp>
#include <migraphx/filesystem.hpp>
#include <string>
#include <memory>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct process_impl;
struct process
{
process(const std::string& cmd);
// move constructor
process(process&&) noexcept;
// copy assignment operator
process& operator=(process rhs);
~process() noexcept;
process& cwd(const fs::path& p);
void exec();
private:
std::unique_ptr<process_impl> impl;
};
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_MIGRAPHX_PROCESS_HPP
......@@ -5,6 +5,7 @@
#include <migraphx/tensor_view.hpp>
#include <migraphx/requires.hpp>
#include <migraphx/config.hpp>
#include <sstream>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -146,6 +147,13 @@ struct raw_data : raw_data_base
migraphx::shape::get_type<T>{});
return reinterpret_cast<T*>(buffer);
}
std::string to_string() const
{
std::stringstream ss;
ss << static_cast<const Derived&>(*this);
return ss.str();
}
};
template <class T,
......
......@@ -102,6 +102,29 @@ void reflect_each(T& x, F f)
});
}
template <class T>
struct reflect_equality
{
friend bool operator==(const T& x, const T& y) { return reflect_tie(x) == reflect_tie(y); }
friend bool operator!=(const T& x, const T& y) { return !(x == y); }
};
template <class T>
struct reflect_stream
{
template <class Stream>
friend Stream& operator<<(Stream& os, const T& x)
{
char d = '{';
reflect_each(x, [&](const auto& y, const auto& name) {
os << d << name << "=" << y;
d = ',';
});
os << "}";
return os;
}
};
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
File mode changed from 100644 to 100755
......@@ -61,6 +61,9 @@ struct shape
static const std::vector<type_t>& types();
static std::string name(type_t t);
static std::string cpp_type(type_t t);
shape();
shape(type_t t);
shape(type_t t, std::vector<std::size_t> l);
......
......@@ -15,6 +15,12 @@ inline namespace MIGRAPHX_INLINE_NS {
#define MIGRAPHX_STRINGIZE_1(...) #__VA_ARGS__
#define MIGRAPHX_STRINGIZE(...) MIGRAPHX_STRINGIZE_1(__VA_ARGS__)
template <class F>
auto with_char(F f)
{
return [=](unsigned char c) { return f(c); };
}
inline std::string
replace_string(std::string subject, const std::string& search, const std::string& replace)
{
......@@ -70,7 +76,7 @@ std::string trim(const std::string& s, F f)
inline std::string trim(const std::string& s)
{
return trim(s, [](int c) { return std::isspace(c); });
return trim(s, [](unsigned char c) { return std::isspace(c); });
}
template <class F>
......@@ -92,6 +98,14 @@ inline bool starts_with(const std::string& value, const std::string& prefix)
return std::equal(prefix.begin(), prefix.end(), value.begin());
}
inline std::string remove_prefix(std::string s, const std::string& prefix)
{
if(starts_with(s, prefix))
return s.substr(prefix.length());
else
return s;
}
template <class F>
inline std::string
interpolate_string(const std::string& input, F f, std::string start = "${", std::string end = "}")
......@@ -124,14 +138,6 @@ inline std::string interpolate_string(const std::string& input,
});
}
inline std::string remove_prefix(std::string s, const std::string& prefix)
{
if(starts_with(s, prefix))
return s.substr(prefix.length());
else
return s;
}
template <class Iterator>
inline std::string to_string_range(Iterator start, Iterator last)
{
......
......@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
struct tmp_dir
{
fs::path path;
tmp_dir();
tmp_dir(const std::string& prefix = "");
void execute(const std::string& exe, const std::string& args) const;
......
......@@ -7,6 +7,7 @@
#include <migraphx/type_name.hpp>
#include <migraphx/rank.hpp>
#include <algorithm>
#include <cassert>
#include <memory>
#include <sstream>
#include <type_traits>
......@@ -58,8 +59,6 @@ struct value_converter<std::string>
{
static const std::string& apply(const std::string& x) { return x; }
static std::string apply(const std::nullptr_t&) { return "null"; }
template <class From>
static auto apply(const From& x)
-> decltype(std::declval<std::stringstream&>() << x, std::string())
......@@ -83,20 +82,28 @@ struct value_converter<std::pair<T, U>>
}
};
template <class To, class From>
To try_convert_value(const From& x);
namespace detail {
template <class To, class Key, class From>
auto try_convert_value_impl(rank<2>, const std::pair<Key, From>& x)
-> decltype(value_converter<To>::apply(x.second))
To try_convert_value_impl(rank<1>, const std::pair<Key, From>& x)
{
return value_converter<To>::apply(x.second);
return try_convert_value<To>(x.second);
}
template <class To, class From>
auto try_convert_value_impl(rank<1>, const From& x) -> decltype(value_converter<To>::apply(x))
auto try_convert_value_impl(rank<2>, const From& x) -> decltype(value_converter<To>::apply(x))
{
return value_converter<To>::apply(x);
}
template <class To, MIGRAPHX_REQUIRES(not std::is_same<To, std::nullptr_t>{})>
To try_convert_value_impl(rank<3>, std::nullptr_t)
{
MIGRAPHX_THROW("Incompatible values: null -> " + get_type_name<To>());
}
template <class To, class From>
To try_convert_value_impl(rank<0>, const From& x)
{
......@@ -107,7 +114,7 @@ To try_convert_value_impl(rank<0>, const From& x)
template <class To, class From>
To try_convert_value(const From& x)
{
return detail::try_convert_value_impl<To>(rank<2>{}, x);
return detail::try_convert_value_impl<To>(rank<3>{}, x);
}
struct value
......@@ -309,7 +316,11 @@ struct value
{
case null_type:
{
v(std::nullptr_t{});
std::nullptr_t null{};
if(this->key.empty())
v(null);
else
v(std::make_pair(this->get_key(), std::ref(null)));
return;
}
#define MIGRAPHX_VALUE_GENERATE_CASE(vt, cpp_type) \
......@@ -328,6 +339,31 @@ struct value
MIGRAPHX_THROW("Unknown type");
}
// Visit value without key
template <class Visitor>
void visit_value(Visitor v) const
{
switch(this->get_type())
{
case null_type:
{
std::nullptr_t null{};
v(null);
return;
}
#define MIGRAPHX_VALUE_GENERATE_CASE_VALUE(vt, cpp_type) \
case vt##_type: \
{ \
v(this->get_##vt()); \
return; \
}
MIGRAPHX_VISIT_VALUE_TYPES(MIGRAPHX_VALUE_GENERATE_CASE)
MIGRAPHX_VALUE_GENERATE_CASE(array, )
MIGRAPHX_VALUE_GENERATE_CASE(object, )
}
MIGRAPHX_THROW("Unknown type");
}
template <class To>
To to() const
{
......@@ -336,6 +372,14 @@ struct value
return result;
}
template <class To>
To value_or(const To& default_value) const
{
if(this->is_null())
return default_value;
return to<To>();
}
template <class To>
std::vector<To> to_vector() const
{
......
......@@ -147,7 +147,7 @@ std::size_t mismatch_diff(R1&& r1, R2&& r2, T diff)
}
template <class R1, class R2>
double rms_range(R1&& r1, R2&& r2)
double rms_range(const R1& r1, const R2& r2)
{
std::size_t n = range_distance(r1);
if(n == range_distance(r2))
......@@ -164,7 +164,7 @@ double rms_range(R1&& r1, R2&& r2)
}
template <class R1, class R2>
bool verify_range(R1&& r1, R2&& r2, double tolerance = 80, double* out_error = nullptr)
bool verify_range(const R1& r1, const R2& r2, double tolerance = 80, double* out_error = nullptr)
{
double threshold = std::numeric_limits<range_value<R1>>::epsilon() * tolerance;
auto error = rms_range(r1, r2);
......
......@@ -443,5 +443,19 @@ shape compute_shape(const operation& op,
return op.compute_shape(to_shapes(args), mods);
}
}
std::vector<shape> try_compute_shape(const operation& op, const std::vector<shape>& inputs)
{
shape new_shape;
try
{
new_shape = op.compute_shape(inputs);
}
catch(...)
{
return {};
}
return {new_shape};
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -396,6 +396,40 @@ instruction_ref module::validate() const
});
}
bool is_borrowed(instruction_ref ins)
{
auto alias = instruction::get_output_alias(ins, true);
if(alias == ins)
return false;
if(alias->get_operator().is_borrowed())
return true;
return is_borrowed(alias);
}
bool is_param_alias(instruction_ref ins)
{
return instruction::get_output_alias(ins)->name() == "@param";
}
bool is_dangling(instruction_ref ins) { return not is_param_alias(ins) and is_borrowed(ins); }
instruction_ref module::find_dangling_reference() const
{
auto last = std::prev(end());
if(last->name() == "@return")
{
auto dangling = std::find_if(
last->inputs().begin(), last->inputs().end(), [](auto x) { return is_dangling(x); });
if(dangling != last->inputs().end())
return *dangling;
}
else if(is_dangling(last))
{
return last;
}
return end();
}
void module::finalize(context& ctx)
{
for(auto ins : iterator_for(*this))
......
......@@ -135,7 +135,7 @@ MSGPACK_API_VERSION_NAMESPACE(MSGPACK_DEFAULT_API_NS)
template <class Stream>
packer<Stream>& operator()(msgpack::packer<Stream>& o, const migraphx::value& v) const
{
v.visit([&](auto&& x) { this->write(o, x); });
v.visit_value([&](auto&& x) { this->write(o, x); });
return o;
}
};
......
......@@ -63,6 +63,7 @@ struct onnx_parser
std::size_t default_dim_value = 1;
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims;
bool skip_unknown_operators = false;
int64_t opset_version = 13;
std::unordered_map<std::string, op_func> ops;
......@@ -71,6 +72,8 @@ struct onnx_parser
void parse_undefined(module* mod, const std::string& name);
static int64_t get_opset_version(const onnx::ModelProto& model);
void parse_from(std::istream& is, std::string name = "");
void parse_from(const void* data, std::size_t size);
void parse_graph(module* mod, const onnx::GraphProto& graph);
......
......@@ -29,6 +29,14 @@ static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
static literal
create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const char* data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
}
// in case of scalar constants in onnx file, use dims=1 to fill initializer data
if(dims.empty())
return literal{{shape_type}, data};
......@@ -38,6 +46,15 @@ create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const
template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, T data)
{
// empty input
auto elem_num =
std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies<std::size_t>());
if(elem_num == 0)
{
return {};
}
// scalar input
if(dims.empty())
return literal{{shape_type}, data.begin(), data.end()};
return literal{{shape_type, dims}, data.begin(), data.end()};
......@@ -210,6 +227,9 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
onnx::ModelProto model;
if(model.ParseFromIstream(&is))
{
auto version = get_opset_version(model);
opset_version = (version == -1) ? opset_version : version;
if(model.has_graph())
{
this->parse_graph(mm, model.graph());
......@@ -227,6 +247,9 @@ void onnx_parser::parse_from(const void* data, std::size_t size)
onnx::ModelProto model;
if(model.ParseFromArray(data, size))
{
auto version = get_opset_version(model);
opset_version = (version == -1) ? opset_version : version;
if(model.has_graph())
{
this->parse_graph(mm, model.graph());
......@@ -238,6 +261,21 @@ void onnx_parser::parse_from(const void* data, std::size_t size)
}
}
int64_t onnx_parser::get_opset_version(const onnx::ModelProto& model)
{
const auto& opset_import = model.opset_import();
int64_t version = -1;
for(const auto& opset : opset_import)
{
if(opset.has_version())
{
version = std::max(version, opset.version());
}
}
return version;
}
void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
{
for(auto&& f : graph.initializer())
......
......@@ -30,7 +30,6 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Identity", "identity"},
{"LeakyRelu", "leaky_relu"},
{"Log", "log"},
{"LogSoftmax", "logsoftmax"},
{"LRN", "lrn"},
{"Neg", "neg"},
{"Reciprocal", "recip"},
......@@ -40,18 +39,15 @@ struct parse_generic_op : op_parser<parse_generic_op>
{"Sign", "sign"},
{"Sin", "sin"},
{"Sinh", "sinh"},
{"Softmax", "softmax"},
{"Sqrt", "sqrt"},
{"Squeeze", "squeeze"},
{"Tan", "tan"},
{"Tanh", "tanh"},
{"Not", "not"},
{"Unsqueeze", "unsqueeze"}};
{"Not", "not"}};
}
bool needs_contiguous(const std::string& op_name) const
{
return contains({"gather", "squeeze", "unsqueeze"}, op_name);
return contains({"gather"}, op_name);
}
instruction_ref parse(const op_desc& opd,
......
......@@ -16,8 +16,13 @@ struct parse_lessorequal : op_parser<parse_lessorequal>
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto int_res = info.add_instruction(make_op("greater"), args[0], args[1]);
return info.add_instruction(make_op("not"), int_res);
auto in_res = info.add_broadcastable_binary_op("greater", args[0], args[1]);
if(in_res->get_shape().type() != shape::bool_type)
{
in_res = info.add_instruction(make_op("convert", {{"target_type", shape::bool_type}}),
in_res);
}
return info.add_instruction(make_op("not"), in_res);
}
};
......
......@@ -72,6 +72,21 @@ struct parse_quantizelinear : op_parser<parse_quantizelinear>
make_op("convert", {{"target_type", shape::int32_type}}), add_zero_point);
add_zero_point = info.add_broadcastable_binary_op("add", add_zero_point, zero_point);
}
auto s = add_zero_point->get_shape();
const auto& lens = s.lens();
std::vector<int64_t> out_lens(lens.begin(), lens.end());
if(min_arg->get_shape() != s)
{
min_arg = info.add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}),
min_arg);
}
if(max_arg->get_shape() != s)
{
max_arg = info.add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}),
max_arg);
}
auto saturated = info.add_instruction(make_op("clip"), add_zero_point, min_arg, max_arg);
return info.add_instruction(make_op("convert", {{"target_type", quant_type}}), saturated);
}
......
......@@ -2,6 +2,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -12,18 +13,44 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
onnx_parser::node_info info,
std::vector<instruction_ref> args)
{
std::size_t n_dim = args.front()->get_shape().lens().size();
// default to reduce over all dimensions
std::vector<int64_t> axes(n_dim);
std::iota(axes.begin(), axes.end(), 0);
if(contains(info.attributes, "axes"))
std::vector<int64_t> axes;
if(args.size() == 2)
{
auto arg_axes = args.at(1)->eval();
check_arg_empty(arg_axes, "PARSE_" + op_name + ": cannot handle variable axes!");
axes.clear();
arg_axes.visit([&](auto s) { axes.assign(s.begin(), s.end()); });
}
else if(contains(info.attributes, "axes"))
{
axes.clear();
auto&& attr_axes = info.attributes["axes"].ints();
axes = std::vector<int64_t>(attr_axes.begin(), attr_axes.end());
}
bool noop_with_empty_axes = false;
if(contains(info.attributes, "noop_with_empty_axes"))
{
noop_with_empty_axes = static_cast<bool>(
parser.parse_value(info.attributes.at("noop_with_empty_axes")).at<int>());
}
// empty axes behavior
if(axes.empty())
{
if(noop_with_empty_axes)
{
return args.at(0);
}
else
{
std::size_t n_dim = args.front()->get_shape().lens().size();
axes.resize(n_dim);
std::iota(axes.begin(), axes.end(), 0);
}
}
int keep_dims = 1;
if(contains(info.attributes, "keepdims"))
{
......@@ -32,11 +59,11 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
if(keep_dims == 1)
{
return info.add_instruction(make_op(op_name, {{"axes", axes}}), args);
return info.add_instruction(make_op(op_name, {{"axes", axes}}), args.front());
}
else
{
auto ins = info.add_instruction(make_op(op_name, {{"axes", axes}}), args);
auto ins = info.add_instruction(make_op(op_name, {{"axes", axes}}), args.front());
return info.add_instruction(make_op("squeeze", {{"axes", axes}}), ins);
}
}
......
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_softmax : op_parser<parse_softmax>
{
std::vector<op_desc> operators() const
{
return {{"Softmax", "softmax"}, {"LogSoftmax", "logsoftmax"}};
}
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
// default axis value is -1 for opset 13
int64_t axis = -1;
// axis value is 1 for previous opset versions
if(parser.opset_version < 13)
{
axis = 1;
}
if(contains(info.attributes, "axis"))
{
axis = parser.parse_value(info.attributes.at("axis")).at<int>();
}
return info.add_instruction(make_op(opd.op_name, {{"axis", axis}}), args);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/instruction.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_squeeze : op_parser<parse_squeeze>
{
std::vector<op_desc> operators() const
{
return {{"Squeeze", "squeeze"}, {"Unsqueeze", "unsqueeze"}};
}
operation assign_axes(operation& op, const std::vector<int64_t>& axes) const
{
auto v = op.to_value();
v["axes"] = axes;
op.from_value(v);
return op;
}
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto op = parser.load(opd.op_name, info);
std::vector<int64_t> axes;
if(args.size() == 2)
{
auto arg_axes = args.at(1)->eval();
check_arg_empty(arg_axes, "PARSE_" + opd.op_name + ": cannot handle variable axes!");
arg_axes.visit([&](auto s) { axes.assign(s.begin(), s.end()); });
op = assign_axes(op, axes);
}
auto arg = info.make_contiguous(args.front());
return info.add_instruction(op, arg);
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment