Commit 2fc6b715 authored by Paul's avatar Paul
Browse files

Merge

parents 5967d68d 118e05c7
...@@ -148,13 +148,21 @@ struct value_parser ...@@ -148,13 +148,21 @@ struct value_parser
template <MIGRAPHX_REQUIRES(not std::is_enum<T>{} and not is_multi_value<T>{})> template <MIGRAPHX_REQUIRES(not std::is_enum<T>{} and not is_multi_value<T>{})>
static T apply(const std::string& x) static T apply(const std::string& x)
{ {
T result; // handle whitespace in string
std::stringstream ss; if constexpr(std::is_same<T, std::string>{})
ss.str(x); {
ss >> result; return x;
if(ss.fail()) }
throw std::runtime_error("Failed to parse '" + x + "' as " + type_name<T>::apply()); else
return result; {
T result;
std::stringstream ss;
ss.str(x);
ss >> result;
if(ss.fail())
throw std::runtime_error("Failed to parse '" + x + "' as " + type_name<T>::apply());
return result;
}
} }
template <MIGRAPHX_REQUIRES(std::is_enum<T>{} and not is_multi_value<T>{})> template <MIGRAPHX_REQUIRES(std::is_enum<T>{} and not is_multi_value<T>{})>
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "verify.hpp" #include "verify.hpp"
#include "argument_parser.hpp" #include "argument_parser.hpp"
#include "command.hpp" #include "command.hpp"
...@@ -32,6 +33,7 @@ ...@@ -32,6 +33,7 @@
#include <migraphx/tf.hpp> #include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/convert_to_json.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
#include <migraphx/json.hpp> #include <migraphx/json.hpp>
#include <migraphx/version.h> #include <migraphx/version.h>
...@@ -67,7 +69,9 @@ struct loader ...@@ -67,7 +69,9 @@ struct loader
bool brief = false; bool brief = false;
std::string output_type; std::string output_type;
std::string output; std::string output;
std::string default_dyn_dim;
std::vector<std::string> param_dims; std::vector<std::string> param_dims;
std::vector<std::string> dyn_param_dims;
std::vector<std::string> output_names; std::vector<std::string> output_names;
void parse(argument_parser& ap) void parse(argument_parser& ap)
...@@ -82,7 +86,11 @@ struct loader ...@@ -82,7 +86,11 @@ struct loader
ap(file_type, {"--tf"}, ap.help("Load as tensorflow"), ap.set_value("tf")); ap(file_type, {"--tf"}, ap.help("Load as tensorflow"), ap.set_value("tf"));
ap(file_type, {"--migraphx"}, ap.help("Load as MIGraphX"), ap.set_value("migraphx")); ap(file_type, {"--migraphx"}, ap.help("Load as MIGraphX"), ap.set_value("migraphx"));
ap(file_type, {"--migraphx-json"}, ap.help("Load as MIGraphX JSON"), ap.set_value("json")); ap(file_type, {"--migraphx-json"}, ap.help("Load as MIGraphX JSON"), ap.set_value("json"));
ap(batch, {"--batch"}, ap.help("Set batch size for model")); ap(batch,
{"--batch"},
ap.help("For a static model, sets default_dim_value size (commonly batch size). For a "
"dynamic batch model, sets the batch "
"size at runtime."));
ap(is_nhwc, {"--nhwc"}, ap.help("Treat tensorflow format as nhwc"), ap.set_value(true)); ap(is_nhwc, {"--nhwc"}, ap.help("Treat tensorflow format as nhwc"), ap.set_value(true));
ap(skip_unknown_operators, ap(skip_unknown_operators,
{"--skip-unknown-operators"}, {"--skip-unknown-operators"},
...@@ -95,7 +103,16 @@ struct loader ...@@ -95,7 +103,16 @@ struct loader
ap.help("Dim of a parameter (format: \"@name d1 d2 dn\")"), ap.help("Dim of a parameter (format: \"@name d1 d2 dn\")"),
ap.append(), ap.append(),
ap.nargs(2)); ap.nargs(2));
ap(dyn_param_dims,
{"--dyn-input-dim"},
ap.help("Dynamic dimensions of a parameter (format: \"@name_1\" \"[{min:x, max:y, "
"optimals:[o1,o2,...]}, dim2,dim3, ...]\", \"@name_2\", ... You can supply a "
"single integer value for a dimension to specify it as fixed."),
ap.append(),
ap.nargs(2));
ap(default_dyn_dim,
{"--default-dyn-dim"},
ap.help("Default dynamic dimension (format: \"{min:x, max:y, optimals:[o1,o2]}\")."));
ap(output_names, ap(output_names,
{"--output-names"}, {"--output-names"},
ap.help("Names of node output (format: \"name_1 name_2 name_n\")"), ap.help("Names of node output (format: \"name_1 name_2 name_n\")"),
...@@ -146,6 +163,40 @@ struct loader ...@@ -146,6 +163,40 @@ struct loader
return map_input_dims; return map_input_dims;
} }
static auto parse_dyn_dims_json(const std::string& dd_json)
{
// expecting a json string like "[{min:1,max:64,optimals:[1,2,4,8]},3,224,224]"
auto v = from_json_string(convert_to_json(dd_json));
std::vector<migraphx::shape::dynamic_dimension> dyn_dims;
std::transform(v.begin(), v.end(), std::back_inserter(dyn_dims), [&](auto x) {
if(x.is_object())
return from_value<migraphx::shape::dynamic_dimension>(x);
auto d = x.template to<std::size_t>();
return migraphx::shape::dynamic_dimension{d, d};
});
return dyn_dims;
}
static auto parse_dyn_dims_map(const std::vector<std::string>& param_dyn_dims)
{
// expecting vector of strings formatted like
// {"@param_name_0", "dd_json_0", "@param_name_1", "dd_json_1", ...}
std::unordered_map<std::string, std::vector<shape::dynamic_dimension>> map_dyn_input_dims;
std::string name = "";
for(auto&& x : param_dyn_dims)
{
if(x[0] == '@')
{
name = x.substr(1);
}
else
{
map_dyn_input_dims[name] = parse_dyn_dims_json(x);
}
}
return map_dyn_input_dims;
}
static auto parse_output_names(const std::vector<std::string>& output_names_info) static auto parse_output_names(const std::vector<std::string>& output_names_info)
{ {
std::vector<std::string> output_node_names; std::vector<std::string> output_node_names;
...@@ -157,13 +208,44 @@ struct loader ...@@ -157,13 +208,44 @@ struct loader
return output_node_names; return output_node_names;
} }
tf_options get_tf_options() const
{
auto map_input_dims = parse_param_dims(param_dims);
auto output_node_names = parse_output_names(output_names);
tf_options options;
options.is_nhwc = is_nhwc;
options.batch_size = batch;
options.map_input_dims = map_input_dims;
options.output_node_names = output_node_names;
return options;
}
onnx_options get_onnx_options() const
{
auto map_input_dims = parse_param_dims(param_dims);
auto map_dyn_input_dims = parse_dyn_dims_map(dyn_param_dims);
onnx_options options;
if(default_dyn_dim.empty())
{
options.default_dim_value = batch;
}
else
{
auto v = from_json_string(convert_to_json(default_dyn_dim));
options.default_dyn_dim_value = from_value<migraphx::shape::dynamic_dimension>(v);
}
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = true;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
return options;
}
program load() program load()
{ {
program p; program p;
if(model.empty()) if(model.empty())
{ {
auto map_input_dims = parse_param_dims(param_dims);
auto output_node_names = parse_output_names(output_names);
if(file_type.empty()) if(file_type.empty())
{ {
if(ends_with(file, ".onnx")) if(ends_with(file, ".onnx"))
...@@ -178,16 +260,11 @@ struct loader ...@@ -178,16 +260,11 @@ struct loader
std::cout << "Reading: " << file << std::endl; std::cout << "Reading: " << file << std::endl;
if(file_type == "onnx") if(file_type == "onnx")
{ {
onnx_options options; p = parse_onnx(file, get_onnx_options());
options.default_dim_value = batch;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = true;
options.map_input_dims = map_input_dims;
p = parse_onnx(file, options);
} }
else if(file_type == "tf") else if(file_type == "tf")
{ {
p = parse_tf(file, tf_options{is_nhwc, batch, map_input_dims, output_node_names}); p = parse_tf(file, get_tf_options());
} }
else if(file_type == "json") else if(file_type == "json")
{ {
...@@ -288,14 +365,21 @@ struct program_params ...@@ -288,14 +365,21 @@ struct program_params
ap(fill1, {"--fill1"}, ap.help("Fill parameter with 1s"), ap.append(), ap.nargs(2)); ap(fill1, {"--fill1"}, ap.help("Fill parameter with 1s"), ap.append(), ap.nargs(2));
} }
auto generate(const program& p, const target& t, bool offload) auto generate(const program& p, const target& t, bool offload, unsigned batch)
{ {
parameter_map m; parameter_map m;
auto param_shapes = p.get_parameter_shapes();
std::unordered_map<std::string, shape> static_param_shapes;
std::transform(
param_shapes.cbegin(),
param_shapes.cend(),
std::inserter(static_param_shapes, static_param_shapes.end()),
[&](const auto& x) { return std::make_pair(x.first, x.second.to_static(batch)); });
for(auto&& s : fill0) for(auto&& s : fill0)
m[s] = fill_argument(p.get_parameter_shape(s), 0); m[s] = fill_argument(static_param_shapes.at(s), 0);
for(auto&& s : fill1) for(auto&& s : fill1)
m[s] = fill_argument(p.get_parameter_shape(s), 1); m[s] = fill_argument(static_param_shapes.at(s), 1);
fill_param_map(m, p, t, offload); fill_param_map(m, static_param_shapes, t, offload);
return m; return m;
} }
}; };
...@@ -304,8 +388,12 @@ struct compiler_target ...@@ -304,8 +388,12 @@ struct compiler_target
{ {
#ifdef HAVE_GPU #ifdef HAVE_GPU
std::string target_name = "gpu"; std::string target_name = "gpu";
#else #elif HAVE_CPU
std::string target_name = "cpu"; std::string target_name = "cpu";
#elif HAVE_FPGA
std::string target_name = "fpga"
#else
std::string target_name = "ref"
#endif #endif
void parse(argument_parser& ap) void parse(argument_parser& ap)
...@@ -348,13 +436,18 @@ struct compiler ...@@ -348,13 +436,18 @@ struct compiler
{"--exhaustive-tune"}, {"--exhaustive-tune"},
ap.help("Exhastively search for best tuning parameters for kernels"), ap.help("Exhastively search for best tuning parameters for kernels"),
ap.set_value(true)); ap.set_value(true));
ap(co.split_single_dyn_dim,
{"--split-single-dyn-dim"},
ap.help("If there is a single non-fixed dynamic dimension in the model, then split to "
"static submodules"),
ap.set_value(true));
ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16)); ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16));
ap(quantize, {"--int8"}, ap.help("Quantize for int8"), ap.set_value(precision::int8)); ap(quantize, {"--int8"}, ap.help("Quantize for int8"), ap.set_value(precision::int8));
} }
auto params(const program& p) auto params(const program& p)
{ {
return parameters.generate(p, ct.get_target(), co.offload_copy); return parameters.generate(p, ct.get_target(), co.offload_copy, l.batch);
} }
program compile() program compile()
...@@ -427,7 +520,7 @@ struct verify : command<verify> ...@@ -427,7 +520,7 @@ struct verify : command<verify>
std::cout << p << std::endl; std::cout << p << std::endl;
auto t = c.ct.get_target(); auto t = c.ct.get_target();
auto m = c.parameters.generate(p, t, true); auto m = c.parameters.generate(p, t, true, c.l.batch);
if(per_instruction) if(per_instruction)
{ {
...@@ -450,7 +543,8 @@ struct version : command<version> ...@@ -450,7 +543,8 @@ struct version : command<version>
void run() const void run() const
{ {
std::cout << "MIGraphX Version: " << MIGRAPHX_VERSION_MAJOR << "." << MIGRAPHX_VERSION_MINOR std::cout << "MIGraphX Version: " << MIGRAPHX_VERSION_MAJOR << "." << MIGRAPHX_VERSION_MINOR
<< std::endl; << "." << MIGRAPHX_VERSION_PATCH << "."
<< MIGRAPHX_STRINGIZE(MIGRAPHX_VERSION_TWEAK) << std::endl;
} }
}; };
...@@ -587,7 +681,9 @@ struct main_command ...@@ -587,7 +681,9 @@ struct main_command
void parse(argument_parser& ap) void parse(argument_parser& ap)
{ {
std::string version_str = "MIGraphX Version: " + std::to_string(MIGRAPHX_VERSION_MAJOR) + std::string version_str = "MIGraphX Version: " + std::to_string(MIGRAPHX_VERSION_MAJOR) +
"." + std::to_string(MIGRAPHX_VERSION_MINOR); "." + std::to_string(MIGRAPHX_VERSION_MINOR) + "." +
std::to_string(MIGRAPHX_VERSION_PATCH) + "." +
MIGRAPHX_STRINGIZE(MIGRAPHX_VERSION_TWEAK);
ap(wrong_commands, {}, ap.metavar("<command>"), ap.append()); ap(wrong_commands, {}, ap.metavar("<command>"), ap.append());
ap(nullptr, {"-h", "--help"}, ap.help("Show help"), ap.show_help(get_command_help())); ap(nullptr, {"-h", "--help"}, ap.help("Show help"), ap.show_help(get_command_help()));
ap(nullptr, ap(nullptr,
......
...@@ -39,36 +39,25 @@ auto get_hash(const T& x) ...@@ -39,36 +39,25 @@ auto get_hash(const T& x)
return std::hash<T>{}(x); return std::hash<T>{}(x);
} }
parameter_map fill_param_map(parameter_map& m, const program& p, const target& t, bool offload) parameter_map fill_param_map(parameter_map& m,
const std::unordered_map<std::string, shape>& param_shapes,
const target& t,
bool offload)
{ {
for(auto&& x : p.get_parameter_shapes()) for(auto&& x : param_shapes)
{ {
argument& arg = m[x.first]; argument& arg = m[x.first];
if(arg.empty()) if(arg.empty())
{
assert(not x.second.dynamic());
arg = generate_argument(x.second, get_hash(x.first)); arg = generate_argument(x.second, get_hash(x.first));
}
if(not offload) if(not offload)
arg = t.copy_to(arg); arg = t.copy_to(arg);
} }
return m; return m;
} }
parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu)
{
for(auto&& x : p.get_parameter_shapes())
{
argument& arg = m[x.first];
if(arg.empty())
arg = generate_argument(x.second, get_hash(x.first));
#ifdef HAVE_GPU
if(gpu)
arg = gpu::to_gpu(arg);
#else
(void)gpu;
#endif
}
return m;
}
parameter_map create_param_map(const program& p, const target& t, bool offload) parameter_map create_param_map(const program& p, const target& t, bool offload)
{ {
parameter_map m; parameter_map m;
......
...@@ -30,8 +30,10 @@ namespace migraphx { ...@@ -30,8 +30,10 @@ namespace migraphx {
namespace driver { namespace driver {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
parameter_map parameter_map fill_param_map(parameter_map& m,
fill_param_map(parameter_map& m, const program& p, const target& t, bool offload = false); const std::unordered_map<std::string, shape>& param_shapes,
const target& t,
bool offload = false);
parameter_map create_param_map(const program& p, const target& t, bool offload = false); parameter_map create_param_map(const program& p, const target& t, bool offload = false);
parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu); parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "verify.hpp" #include "verify.hpp"
#include "perf.hpp" #include "perf.hpp"
#include <migraphx/ref/target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/verify_args.hpp> #include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
...@@ -37,7 +37,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -37,7 +37,7 @@ inline namespace MIGRAPHX_INLINE_NS {
std::vector<argument> run_ref(program p, const parameter_map& inputs) std::vector<argument> run_ref(program p, const parameter_map& inputs)
{ {
p.compile(ref::target{}); p.compile(migraphx::make_target("ref"));
auto out = p.eval(inputs); auto out = p.eval(inputs);
std::cout << p << std::endl; std::cout << p << std::endl;
return out; return out;
......
...@@ -21,25 +21,44 @@ ...@@ -21,25 +21,44 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/manage_ptr.hpp>
#include <migraphx/dynamic_loader.hpp> #include <migraphx/dynamic_loader.hpp>
#include <migraphx/errors.hpp> #include <migraphx/errors.hpp>
#include <migraphx/file_buffer.hpp> #include <migraphx/file_buffer.hpp>
#include <migraphx/tmp_dir.hpp> #include <migraphx/tmp_dir.hpp>
#include <utility> #include <utility>
#include <dlfcn.h> #include <dlfcn.h>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
void check_load_error(bool flush = false)
{
char* error_msg = dlerror();
if(not flush and error_msg != nullptr)
MIGRAPHX_THROW("Dynamic loading or symbol lookup failed with " + std::string(error_msg));
}
struct dynamic_loader_impl struct dynamic_loader_impl
{ {
dynamic_loader_impl() = default; dynamic_loader_impl() = default;
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
dynamic_loader_impl(const fs::path& p, std::shared_ptr<tmp_dir> t = nullptr) dynamic_loader_impl(const fs::path& p, std::shared_ptr<tmp_dir> t = nullptr)
: handle(dlopen(p.string().c_str(), RTLD_LAZY), &dlclose), temp(std::move(t)) : handle(dlopen(p.string().c_str(), RTLD_LAZY),
manage_deleter<decltype(&dlclose), &dlclose>{}),
temp(std::move(t))
{ {
check_load_error();
} }
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
static std::shared_ptr<dynamic_loader_impl> from_buffer(const char* image, std::size_t size) static std::shared_ptr<dynamic_loader_impl> from_buffer(const char* image, std::size_t size)
{ {
auto t = std::make_shared<tmp_dir>("dloader"); auto t = std::make_shared<tmp_dir>("dloader");
...@@ -52,6 +71,16 @@ struct dynamic_loader_impl ...@@ -52,6 +71,16 @@ struct dynamic_loader_impl
std::shared_ptr<tmp_dir> temp = nullptr; std::shared_ptr<tmp_dir> temp = nullptr;
}; };
fs::path dynamic_loader::path(void* address)
{
fs::path p;
Dl_info info;
// Find the location of .so
if(dladdr(address, &info) != 0)
p = info.dli_fname;
return p;
}
dynamic_loader::dynamic_loader(const fs::path& p) : impl(std::make_shared<dynamic_loader_impl>(p)) dynamic_loader::dynamic_loader(const fs::path& p) : impl(std::make_shared<dynamic_loader_impl>(p))
{ {
} }
...@@ -68,10 +97,11 @@ dynamic_loader::dynamic_loader(const std::vector<char>& buffer) ...@@ -68,10 +97,11 @@ dynamic_loader::dynamic_loader(const std::vector<char>& buffer)
std::shared_ptr<void> dynamic_loader::get_symbol(const std::string& name) const std::shared_ptr<void> dynamic_loader::get_symbol(const std::string& name) const
{ {
dlerror(); // flush any previous error messages
check_load_error(true);
void* symbol = dlsym(impl->handle.get(), name.c_str()); void* symbol = dlsym(impl->handle.get(), name.c_str());
if(symbol == nullptr) if(symbol == nullptr)
MIGRAPHX_THROW("Symbol not found: " + name); check_load_error();
return {impl, symbol}; return {impl, symbol};
} }
......
...@@ -28,11 +28,8 @@ ...@@ -28,11 +28,8 @@
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/pass_config.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fuse_reduce.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/register_op.hpp>
#include <iterator>
#include <map>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct fused_reduce
{
std::vector<std::int64_t> axes{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.axes, "axes"));
}
shape compute_shape(const std::vector<shape>& inputs, std::vector<module_ref> mods) const
{
if(mods.size() != 1)
MIGRAPHX_THROW("should have one submodule.");
auto* sm = mods.front();
if(sm->get_output_shapes().size() != 1)
MIGRAPHX_THROW("Only one output supported");
auto names = sm->get_parameter_names();
check_shapes{inputs, *this}.has(names.size()).same_ndims();
std::sort(names.begin(), names.end());
auto shapes = sm->get_parameter_shapes();
// Check dimension matches for each input
if(not equal(names, inputs, [&](const auto& name, const auto& input) {
return shapes.at(name).lens() == input.lens();
}))
MIGRAPHX_THROW("Dimenstion does not match the submodule.");
const auto& s = inputs.at(0);
auto lens = s.lens();
if(lens != sm->get_output_shapes().front().lens())
{
for(const auto& axis : axes)
{
lens[axis] = 1;
}
}
return shape::from_permutation(
sm->get_output_shapes().front().type(), lens, find_permutation(inputs));
}
std::string name() const { return "fused_reduce"; }
};
MIGRAPHX_REGISTER_OP(fused_reduce);
static std::unordered_map<instruction_ref, instruction_ref>
get_ins_param_map(const std::vector<instruction_ref>& inputs, const_module_ref sm)
{
std::unordered_map<instruction_ref, instruction_ref> result;
auto names = sm->get_parameter_names();
std::sort(names.begin(), names.end());
assert(names.size() == inputs.size());
std::transform(names.begin(),
names.end(),
inputs.begin(),
std::inserter(result, result.end()),
[&](const auto& name, auto input) {
return std::make_pair(input, sm->get_parameter(name));
});
return result;
}
static void insert_params(module_ref sm,
instruction_ref ins,
std::unordered_map<instruction_ref, instruction_ref>& map_ins)
{
auto n = sm->get_parameter_shapes().size();
for(auto input : ins->inputs())
{
if(contains(map_ins, input))
continue;
auto s = shape{input->get_shape().type(), input->get_shape().lens()};
map_ins[input] = sm->add_parameter("x" + std::to_string(n++), s);
}
}
static auto insert_ins_in_submodule(module_ref sm,
instruction_ref ins,
std::unordered_map<instruction_ref, instruction_ref>& map_ins)
{
insert_params(sm, ins, map_ins);
return sm->add_instructions({ins}, map_ins);
}
static auto insert_ins_in_submodule(module_ref sm, instruction_ref ins)
{
std::unordered_map<instruction_ref, instruction_ref> map_ins;
return insert_ins_in_submodule(sm, ins, map_ins);
}
static auto
insert_module_in_submodule(module_ref sm,
instruction_ref ins,
std::unordered_map<instruction_ref, instruction_ref>& map_ins)
{
insert_params(sm, ins, map_ins);
auto* m = ins->module_inputs().front();
auto param_map = get_ins_param_map(ins->inputs(), m);
for(auto&& [input, param] : param_map)
{
map_ins[param] = map_ins.at(input);
}
return sm->add_instructions(m, map_ins);
}
static std::vector<instruction_ref>
find_inputs(module_ref sm,
const module& parent,
const std::unordered_map<instruction_ref, instruction_ref>& map_ins)
{
std::vector<instruction_ref> result;
std::map<std::string, instruction_ref> names;
for(auto&& [input, param] : map_ins)
{
if(not sm->has_instruction(param))
continue;
if(param->name() != "@param")
continue;
if(not parent.has_instruction(input))
continue;
auto v = param->get_operator().to_value();
auto name = v.at("parameter").to<std::string>();
names[name] = input;
}
std::transform(names.begin(), names.end(), std::back_inserter(result), [](const auto& p) {
return p.second;
});
assert(result.size() == sm->get_parameter_shapes().size());
return result;
}
static void create_reduce_modules(module_pass_manager& mpm)
{
std::size_t n = 0;
for(auto ins : iterator_for(mpm.get_module()))
{
if(not ins->get_operator().attributes().get("reduce", false))
continue;
if(ins->inputs().size() != 1)
continue;
auto* rm =
mpm.create_module(mpm.get_module().name() + ":" + ins->name() + std::to_string(n++));
rm->set_bypass();
rm->add_return(insert_ins_in_submodule(rm, ins));
auto v = ins->get_operator().to_value();
mpm.get_module().replace_instruction(
ins, make_op("fused_reduce", {{"axes", v["axes"]}}), ins->inputs(), {rm});
}
}
template <class... Ms>
static auto match_broadcast(Ms... ms)
{
return match::skip(match::name("contiguous"))(
match::name("multibroadcast")(match::arg(0)(ms...), match::used_once()).bind("broadcast"));
}
template <class... Ms>
static auto any_input(Ms... ms)
{
return match::any_of[match::inputs()](match::any(ms...).bind("input"));
}
static auto match_broadcastable_input(const std::string& op, const std::string& name)
{
auto match_op = match::name(op)(match::used_once()).bind(name);
auto match_op_input = any_input(match_op, match::used_once());
auto broadcast_match_op_input = any_input(match_broadcast(match_op), match::used_once());
return match::any_of(match_op_input, broadcast_match_op_input);
}
namespace {
struct find_pointwise_reduce
{
auto matcher() const
{
return match::name("fused_reduce")(match_broadcastable_input("pointwise", "pointwise"));
}
void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{
auto reduce = r.result;
auto input = r.instructions["pointwise"];
const auto* pm = input->module_inputs().front();
const auto* old_rm = reduce->module_inputs().front();
auto* rm = mpm.create_module(pm->name() + ":" + old_rm->name());
rm->set_bypass();
std::unordered_map<instruction_ref, instruction_ref> map_ins;
// Insert pointwise
auto rins = insert_ins_in_submodule(rm, input, map_ins).front();
map_ins[input] = rins;
if(contains(r.instructions, "broadcast"))
{
auto broadcast = r.instructions["broadcast"];
map_ins[broadcast] = insert_ins_in_submodule(rm, broadcast, map_ins).front();
}
// Insert fused_reduce
rm->add_return(insert_module_in_submodule(rm, reduce, map_ins));
auto new_inputs = find_inputs(rm, mpm.get_module(), map_ins);
mpm.get_module().replace_instruction(reduce, reduce->get_operator(), new_inputs, {rm});
}
};
struct find_reduce_pointwise
{
auto matcher() const
{
return match::name("pointwise")(match_broadcastable_input("fused_reduce", "reduce"));
}
void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{
auto pw = r.result;
auto reduce = r.instructions["reduce"];
auto input = r.instructions["input"];
const auto* pm = pw->module_inputs().front();
const auto* old_rm = reduce->module_inputs().front();
auto* rm = mpm.create_module(old_rm->name() + ":" + pm->name());
rm->set_bypass();
std::unordered_map<instruction_ref, instruction_ref> map_ins;
// Copy module instructions
insert_module_in_submodule(rm, reduce, map_ins);
if(contains(r.instructions, "broadcast"))
{
auto broadcast = r.instructions["broadcast"];
map_ins[broadcast->inputs().front()] = rm->get_returns().front();
auto bout = insert_ins_in_submodule(rm, broadcast, map_ins);
map_ins[input] = bout.front();
}
else
{
map_ins[input] = rm->get_returns().front();
}
auto out = insert_ins_in_submodule(rm, pw, map_ins);
rm->replace_return(out);
auto new_inputs = find_inputs(rm, mpm.get_module(), map_ins);
mpm.get_module().replace_instruction(pw, reduce->get_operator(), new_inputs, {rm});
}
};
struct find_reduce_reduce
{
auto matcher() const
{
return match::name("fused_reduce")(match_broadcastable_input("fused_reduce", "reduce"));
}
void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{
auto reduce1 = r.result;
auto reduce2 = r.instructions["reduce"];
auto input = r.instructions["input"];
if(reduce1->get_operator() != reduce2->get_operator())
return;
const auto* rm1 = reduce1->module_inputs().front();
const auto* rm2 = reduce2->module_inputs().front();
auto* rm = mpm.create_module(rm1->name() + ":" + rm2->name());
rm->set_bypass();
std::unordered_map<instruction_ref, instruction_ref> map_ins;
// Copy reduce1 instructions
insert_module_in_submodule(rm, reduce2, map_ins);
if(contains(r.instructions, "broadcast"))
{
auto broadcast = r.instructions["broadcast"];
map_ins[broadcast->inputs().front()] = rm->get_returns().front();
auto bout = insert_ins_in_submodule(rm, broadcast, map_ins);
map_ins[input] = bout.front();
}
else
{
map_ins[input] = rm->get_returns().front();
}
auto out = insert_module_in_submodule(rm, reduce1, map_ins);
rm->replace_return(out);
auto new_inputs = find_inputs(rm, mpm.get_module(), map_ins);
mpm.get_module().replace_instruction(reduce1, reduce1->get_operator(), new_inputs, {rm});
}
};
} // namespace
void fuse_reduce::apply(module_pass_manager& mpm) const
{
create_reduce_modules(mpm);
mpm.run_pass(dead_code_elimination{});
for(int i = 0; i < 4; i++)
{
match::find_matches(
mpm, find_reduce_pointwise{}, find_pointwise_reduce{}, find_reduce_reduce{});
mpm.run_pass(dead_code_elimination{});
}
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/register_op.hpp> #include <migraphx/register_op.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/ranges.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -36,7 +38,27 @@ struct check_context ...@@ -36,7 +38,27 @@ struct check_context
{ {
struct op : auto_register_op<op> struct op : auto_register_op<op>
{ {
std::string name() const { return "check_context::" + get_type_name<T>(); } static std::string compute_op_name()
{
const auto& op_type_name = get_type_name<T>();
const auto& split_name = split_string(op_type_name, ':');
std::vector<std::string> name_without_version = {"check_context"};
// op_type_name would contain internal namespace name with version_x_y_z
// remove version and construct op_name such as check_context::migraphx::gpu::context
std::copy_if(
split_name.begin(),
split_name.end(),
std::back_inserter(name_without_version),
[&](const auto& i) { return not i.empty() and not contains(i, "version"); });
return join_strings(name_without_version, "::");
}
std::string name() const
{
static auto op_name = compute_op_name();
return op_name;
}
shape compute_shape(const std::vector<shape>&) const { return {}; } shape compute_shape(const std::vector<shape>&) const { return {}; }
argument compute(context& ctx, const shape&, const std::vector<argument>&) const argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{ {
......
...@@ -41,6 +41,11 @@ std::vector<shape::dynamic_dimension> compute_broadcasted_dyn_dims(shape s0, sha ...@@ -41,6 +41,11 @@ std::vector<shape::dynamic_dimension> compute_broadcasted_dyn_dims(shape s0, sha
shape common_shape(const std::vector<shape>& shapes); shape common_shape(const std::vector<shape>& shapes);
std::vector<instruction_ref>
insert_common_args(module& m, instruction_ref ins, std::vector<instruction_ref> inputs);
std::vector<instruction_ref> add_common_args(module& m, std::vector<instruction_ref> inputs);
instruction_ref insert_common_op(module& m, instruction_ref insert_common_op(module& m,
instruction_ref ins, instruction_ref ins,
const operation& op, const operation& op,
......
...@@ -32,9 +32,17 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -32,9 +32,17 @@ inline namespace MIGRAPHX_INLINE_NS {
struct compile_options struct compile_options
{ {
bool offload_copy = false; /**
* Have MIGX allocate memory for parameters and add instructions
* to copy parameters and output to/from an offload device like a GPU.
*/
bool offload_copy = false;
bool fast_math = true; bool fast_math = true;
bool exhaustive_tune = false; bool exhaustive_tune = false;
/// Use the split_single_dyn_dim pass
bool split_single_dyn_dim = false;
tracer trace{}; tracer trace{};
}; };
......
...@@ -24,22 +24,32 @@ ...@@ -24,22 +24,32 @@
#ifndef MIGRAPHX_GUARD_CONFIG_HPP #ifndef MIGRAPHX_GUARD_CONFIG_HPP
#define MIGRAPHX_GUARD_CONFIG_HPP #define MIGRAPHX_GUARD_CONFIG_HPP
namespace migraphx {
#if !defined(MIGRAPHX_USE_CLANG_TIDY) && !defined(DOXYGEN) #if !defined(MIGRAPHX_USE_CLANG_TIDY) && !defined(DOXYGEN)
#ifdef BUILD_DEV
#define MIGRAPHX_INLINE_NS version_1 #define MIGRAPHX_INLINE_NS version_1
#endif #else
#include <migraphx/version.h>
#define MIGRAPHX_VERSION_PRIMITIVE_CONCAT(x, y) x##_##y
#define MIGRAPHX_VERSION_CONCAT(x, y) MIGRAPHX_VERSION_PRIMITIVE_CONCAT(x, y)
#define MIGRAPHX_VERSION \
MIGRAPHX_VERSION_CONCAT( \
MIGRAPHX_VERSION_CONCAT(MIGRAPHX_VERSION_MAJOR, MIGRAPHX_VERSION_MINOR), \
MIGRAPHX_VERSION_PATCH)
#define MIGRAPHX_INLINE_NS MIGRAPHX_VERSION_CONCAT(version, MIGRAPHX_VERSION)
#endif // build_dev
#endif // clang_tidy
#ifdef DOXYGEN #ifdef DOXYGEN
#define MIGRAPHX_INLINE_NS internal #define MIGRAPHX_INLINE_NS internal
#endif #endif // doxygen
#ifdef MIGRAPHX_USE_CLANG_TIDY #ifdef MIGRAPHX_USE_CLANG_TIDY
#define MIGRAPHX_TIDY_CONST const #define MIGRAPHX_TIDY_CONST const
#else #else
#define MIGRAPHX_TIDY_CONST #define MIGRAPHX_TIDY_CONST
#endif #endif // tidy_const
#endif // clang_tidy
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_CONVOLUTION_HPP
#define MIGRAPHX_GUARD_RTGLIB_CONVOLUTION_HPP
#include <migraphx/config.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/tensor_view.hpp>
#include <vector>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
template <class Output, class T, class Padding, class Stride>
void convolution(Output output, T input, T weights, Padding padding, Stride stride, int group)
{
auto output_shape = output.get_shape();
auto in_lens = input.get_shape().lens();
auto wei_lens = weights.get_shape().lens();
auto wei_n = wei_lens[0];
auto wei_c = wei_lens[1];
std::vector<std::size_t> win_size(wei_lens.begin() + 1, wei_lens.end());
par_for(output_shape.elements(), [&](auto i) {
auto idx_o = output_shape.multi(i);
auto w = idx_o[1];
auto n_dim = idx_o.size();
std::vector<std::ptrdiff_t> win_start;
for(std::size_t dim = 2; dim < n_dim; ++dim)
{
auto d_2 = dim - 2;
win_start.push_back(std::ptrdiff_t(idx_o[dim] * stride[d_2]) -
std::ptrdiff_t(padding[d_2]));
}
const auto group_id = w / (wei_n / group);
shape win_shape{output_shape.type(), win_size};
double acc = 0.0;
shape_for_each(win_shape, [&](auto idx_win) {
auto k = idx_win[0];
const auto in_ch = group_id * wei_c + k;
std::vector<std::ptrdiff_t> idx(idx_o.begin(), idx_o.end());
idx[1] = in_ch;
std::transform(idx_win.begin() + 1,
idx_win.end(),
win_start.begin(),
idx.begin() + 2,
[](std::ptrdiff_t ii, std::ptrdiff_t jj) { return ii + jj; });
std::vector<std::ptrdiff_t> idx_wei(idx_o.size());
idx_wei[0] = w;
std::copy(idx_win.begin(), idx_win.end(), idx_wei.begin() + 1);
if(std::all_of(idx.begin() + 2, idx.end(), [&](auto ii) { return ii >= 0; }) and
std::equal(idx.begin(),
idx.end(),
in_lens.begin(),
in_lens.end(),
std::less<std::ptrdiff_t>{}))
{
acc += input(idx.begin(), idx.end()) * weights(idx_wei.begin(), idx_wei.end());
}
});
output[i] = acc;
});
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -77,6 +77,7 @@ struct cpp_generator ...@@ -77,6 +77,7 @@ struct cpp_generator
function& set_types(const module& m); function& set_types(const module& m);
function& set_types(const module& m, const std::function<std::string(shape)>& parse); function& set_types(const module& m, const std::function<std::string(shape)>& parse);
function& set_generic_types(const module& m); function& set_generic_types(const module& m);
function& add_generic_param(const std::string& pname);
}; };
cpp_generator(); cpp_generator();
...@@ -105,6 +106,10 @@ struct cpp_generator ...@@ -105,6 +106,10 @@ struct cpp_generator
std::string create_function(const function& f); std::string create_function(const function& f);
static std::vector<std::string>
to_args(const std::vector<instruction_ref>& inputs,
const std::unordered_map<instruction_ref, std::string>& names);
private: private:
std::unique_ptr<cpp_generator_impl> impl; std::unique_ptr<cpp_generator_impl> impl;
}; };
......
...@@ -37,6 +37,12 @@ struct dynamic_loader_impl; ...@@ -37,6 +37,12 @@ struct dynamic_loader_impl;
struct dynamic_loader struct dynamic_loader
{ {
template <class T>
static fs::path path(T* address)
{
return path(reinterpret_cast<void*>(address));
}
static fs::path path(void* address);
dynamic_loader() = default; dynamic_loader() = default;
dynamic_loader(const fs::path& p); dynamic_loader(const fs::path& p);
......
...@@ -21,18 +21,23 @@ ...@@ -21,18 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_MIGRAPHX_FUSE_REDUCE_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_FUSE_REDUCE_HPP
#ifndef MIGRAPHX_GUARD_PASS_CONFIG_HPP
#define MIGRAPHX_GUARD_PASS_CONFIG_HPP
#include <migraphx/env.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <string>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MEMORY_COLORING) struct module_pass_manager;
struct fuse_reduce
{
std::string name() const { return "fuse_reduce"; }
void apply(module_pass_manager& mpm) const;
};
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
#endif // MIGRAPHX_GUARD_PASS_CONFIG_HPP #endif // MIGRAPHX_GUARD_MIGRAPHX_FUSE_POINTWISE_HPP
...@@ -347,6 +347,7 @@ match::matcher_result find_match(module& modl, M&& m) ...@@ -347,6 +347,7 @@ match::matcher_result find_match(module& modl, M&& m)
} }
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_TRACE_MATCHES) MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_TRACE_MATCHES)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_VALIDATE_MATCHES)
/// Find matches for an instruction in the module /// Find matches for an instruction in the module
template <class Mod, class... Ms> template <class Mod, class... Ms>
...@@ -356,7 +357,11 @@ void find_matches(Mod& mod, instruction_ref ins, Ms&&... ms) ...@@ -356,7 +357,11 @@ void find_matches(Mod& mod, instruction_ref ins, Ms&&... ms)
const const
#endif #endif
int trace = value_of(MIGRAPHX_TRACE_MATCHES{}); int trace = value_of(MIGRAPHX_TRACE_MATCHES{});
bool match = false; #if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 5
const
#endif
bool validate = enabled(MIGRAPHX_VALIDATE_MATCHES{});
bool match = false;
each_args( each_args(
[&](auto&& m) { [&](auto&& m) {
if(match) if(match)
...@@ -371,7 +376,20 @@ void find_matches(Mod& mod, instruction_ref ins, Ms&&... ms) ...@@ -371,7 +376,20 @@ void find_matches(Mod& mod, instruction_ref ins, Ms&&... ms)
std::cout << "Matched by " << get_type_name(m) << std::endl; std::cout << "Matched by " << get_type_name(m) << std::endl;
get_module(mod).debug_print(ins); get_module(mod).debug_print(ins);
} }
// If its already invalid dont validate it again
bool invalidated = validate and get_module(mod).validate() != get_module(mod).end();
m.apply(mod, r); m.apply(mod, r);
if(validate and not invalidated)
{
auto invalid = get_module(mod).validate();
if(invalid != get_module(mod).end())
{
std::cout << "Invalid program from match: " << get_type_name(m) << std::endl;
std::cout << "Invalid instructions: " << std::endl;
get_module(mod).debug_print(invalid->inputs());
get_module(mod).debug_print(invalid);
}
}
match = true; match = true;
}, },
ms...); ms...);
......
...@@ -54,10 +54,6 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins ...@@ -54,10 +54,6 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
*/ */
struct module struct module
{ {
// used by replace_allocate pass
// allocate memory in this module rather than using output parmaeters
bool use_local_alloc = false;
module(const std::string& name = ""); module(const std::string& name = "");
// move constructor // move constructor
...@@ -182,6 +178,8 @@ struct module ...@@ -182,6 +178,8 @@ struct module
bool has_instruction(instruction_ref ins) const; bool has_instruction(instruction_ref ins) const;
std::vector<instruction_ref> get_returns() const;
std::size_t size() const; std::size_t size() const;
instruction_ref begin() const; instruction_ref begin() const;
instruction_ref end() const; instruction_ref end() const;
......
...@@ -26,10 +26,12 @@ ...@@ -26,10 +26,12 @@
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/value.hpp> #include <migraphx/value.hpp>
#include <functional>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
void to_msgpack(const value& v, std::function<void(const char*, std::size_t)> writer);
std::vector<char> to_msgpack(const value& v); std::vector<char> to_msgpack(const value& v);
value from_msgpack(const std::vector<char>& buffer); value from_msgpack(const std::vector<char>& buffer);
value from_msgpack(const char* buffer, std::size_t size); value from_msgpack(const char* buffer, std::size_t size);
......
...@@ -37,7 +37,7 @@ struct onnx_options ...@@ -37,7 +37,7 @@ struct onnx_options
std::size_t default_dim_value = 0; std::size_t default_dim_value = 0;
/// Default dynamic dimension size (if both default_dim_value and default_dyn_dim_value set /// Default dynamic dimension size (if both default_dim_value and default_dyn_dim_value set
/// parser throws) /// parser throws)
shape::dynamic_dimension default_dyn_dim_value = {1, 1, 0}; shape::dynamic_dimension default_dyn_dim_value = {1, 1};
/// Explicitly specify the dims of an input /// Explicitly specify the dims of an input
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims = {}; std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims = {};
/// Explicitly specify dynamic dims of an input (if both map_input_dims and map_dyn_input_dims /// Explicitly specify dynamic dims of an input (if both map_input_dims and map_dyn_input_dims
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment