Commit 7e297b13 authored by Paul's avatar Paul
Browse files

Merge

parents 86ea5e91 aa7ff911
#ifndef MIGRAPHX_GUARD_RTGLIB_MARKER_ROCTX_HPP
#define MIGRAPHX_GUARD_RTGLIB_MARKER_ROCTX_HPP
#include <migraphx/marker.hpp>
namespace migraphx {
namespace driver {
inline namespace MIGRAPHX_INLINE_NS {
marker create_marker_roctx();
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
} // namespace migraphx
#endif
......@@ -87,6 +87,6 @@ target get_target(bool gpu)
void compile_program(program& p, bool gpu) { p.compile(get_target(gpu)); }
} // namespace MIGRAPHX_INLINE_NS
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_PRECISION_HPP
#define MIGRAPHX_GUARD_RTGLIB_PRECISION_HPP
namespace migraphx {
namespace driver {
inline namespace MIGRAPHX_INLINE_NS {
enum class precision
{
fp32,
fp16,
int8
};
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
} // namespace migraphx
#endif
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/apply_alpha_beta.hpp>
#include "models.hpp"
namespace migraphx {
......@@ -560,7 +561,7 @@ migraphx::program resnet50(unsigned batch) // NOLINT(readability-function-size)
migraphx::op::relu relu269;
auto mx269 = mm->add_instruction(relu269, mx268);
migraphx::op::pooling pooling270;
pooling270.mode = "max";
pooling270.mode = migraphx::op::pooling_mode::max;
pooling270.padding = {1, 1};
pooling270.stride = {2, 2};
pooling270.lengths = {3, 3};
......@@ -1214,7 +1215,7 @@ migraphx::program resnet50(unsigned batch) // NOLINT(readability-function-size)
migraphx::op::relu relu438;
auto mx438 = mm->add_instruction(relu438, mx437);
migraphx::op::pooling pooling439;
pooling439.mode = "average";
pooling439.mode = migraphx::op::pooling_mode::average;
pooling439.padding = {0, 0};
pooling439.stride = {1, 1};
pooling439.lengths = {7, 7};
......@@ -1228,10 +1229,10 @@ migraphx::program resnet50(unsigned batch) // NOLINT(readability-function-size)
migraphx::op::multibroadcast multibroadcast442;
multibroadcast442.output_lens = {batch, 1000};
auto mx442 = mm->add_instruction(multibroadcast442, mx0);
migraphx::op::dot dot443;
dot443.alpha = 1;
dot443.beta = 1;
mm->add_instruction(dot443, mx440, mx441, mx442);
float dot443_alpha = 1;
float dot443_beta = 1;
migraphx::add_apply_alpha_beta(
*mm, {mx440, mx441, mx442}, migraphx::make_op("dot"), dot443_alpha, dot443_beta);
return p;
}
......
......@@ -6,6 +6,7 @@
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/quantization.hpp>
namespace migraphx {
namespace driver {
......@@ -19,9 +20,16 @@ std::vector<argument> run_ref(program p, const parameter_map& inputs)
return out;
}
std::vector<argument>
run_target(program p, const target& t, const compile_options& options, const parameter_map& inputs)
std::vector<argument> run_target(program p,
const target& t,
const compile_options& options,
precision quantize,
const parameter_map& inputs)
{
if(quantize == precision::fp16)
{
quantize_fp16(p);
}
p.compile(t, options);
parameter_map m;
......@@ -43,24 +51,24 @@ void verify_program(const std::string& name,
const program& p,
const target& t,
compile_options options,
precision quantize,
const parameter_map& inputs,
double tolerance)
{
auto x = run_ref(p, inputs);
auto y = run_target(p, t, options, inputs);
auto y = run_target(p, t, options, quantize, inputs);
std::size_t output_num = x.size();
for(std::size_t i = 0; i < output_num; ++i)
{
verify_args(name, x[i], y[i], tolerance);
}
// std::cout << "cpu: " << x << std::endl;
// std::cout << "gpu: " << y << std::endl;
}
void verify_instructions(const program& prog,
const target& t,
compile_options options,
precision quantize,
double tolerance)
{
const auto* mm_prog = prog.get_main_module();
......@@ -92,7 +100,8 @@ void verify_instructions(const program& prog,
{
std::cout << "Verify: " << ins.name() << std::endl;
std::cout << p << std::endl;
verify_program(ins.name(), p, t, options, create_param_map(p, false), tolerance);
verify_program(
ins.name(), p, t, options, quantize, create_param_map(p, false), tolerance);
}
catch(...)
{
......@@ -106,6 +115,7 @@ void verify_reduced(program p,
int n,
const target& t,
compile_options options,
precision quantize,
const parameter_map& inputs,
double tolerance)
{
......@@ -114,12 +124,13 @@ void verify_reduced(program p,
mm->remove_instructions(last, mm->end());
std::cout << "Verify: " << std::endl;
std::cout << p << std::endl;
verify_program(std::to_string(n), p, t, options, inputs, tolerance);
verify_program(std::to_string(n), p, t, options, quantize, inputs, tolerance);
}
void verify_reduced_program(const program& p,
const target& t,
compile_options options,
precision quantize,
const parameter_map& inputs,
double tolerance)
{
......@@ -127,7 +138,7 @@ void verify_reduced_program(const program& p,
auto n = std::distance(mm->begin(), mm->end());
for(std::size_t i = 0; i < n; i++)
{
verify_reduced(p, i, t, options, inputs, tolerance);
verify_reduced(p, i, t, options, quantize, inputs, tolerance);
}
}
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DRIVER_VERIFY_HPP
#define MIGRAPHX_GUARD_RTGLIB_DRIVER_VERIFY_HPP
#include "precision.hpp"
#include <migraphx/program.hpp>
namespace migraphx {
......@@ -11,15 +12,18 @@ void verify_program(const std::string& name,
const program& p,
const target& t,
compile_options options = compile_options{},
precision quantize = precision::fp32,
const parameter_map& inputs = {},
double tolerance = 100);
void verify_instructions(const program& prog,
const target& t,
compile_options options = compile_options{},
precision quantize = precision::fp32,
double tolerance = 80);
void verify_reduced_program(const program& p,
const target& t,
compile_options options = compile_options{},
precision quantize = precision::fp32,
const parameter_map& inputs = {},
double tolerance = 80);
......
......@@ -45,6 +45,7 @@ dynamic_loader::dynamic_loader(const std::vector<char>& buffer)
std::shared_ptr<void> dynamic_loader::get_symbol(const std::string& name) const
{
dlerror();
void* symbol = dlsym(impl->handle.get(), name.c_str());
if(symbol == nullptr)
MIGRAPHX_THROW("Symbol not found: " + name);
......
......@@ -13,13 +13,13 @@
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void eliminate_allocation::apply(module& p) const
void eliminate_allocation::apply(module& m) const
{
assert(alignment > 0);
std::size_t n = 0;
std::vector<std::pair<instruction_ref, std::size_t>> allocs;
for(auto ins : iterator_for(p))
for(auto ins : iterator_for(m))
{
if(ins->name() != allocation_op)
continue;
......@@ -30,13 +30,13 @@ void eliminate_allocation::apply(module& p) const
}
if(n > 0)
{
auto mem = p.add_parameter("memory", shape{shape::int8_type, {n}});
auto mem = m.add_parameter("memory", shape{shape::int8_type, {n}});
for(auto&& pp : allocs)
{
auto ins = pp.first;
auto s = ins->get_shape();
auto offset = pp.second;
p.replace_instruction(
m.replace_instruction(
ins, make_op("load", {{"shape", to_value(s)}, {"offset", offset}}), mem);
}
}
......
......@@ -11,7 +11,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
template <class Range>
void cse_range(module& p, Range&& r)
void cse_range(module& m, Range&& r)
{
std::unordered_multimap<std::string, instruction_ref> instructions;
std::unordered_set<instruction_ref> processed_ins;
......@@ -30,19 +30,24 @@ void cse_range(module& p, Range&& r)
continue;
if(*eq != *ins)
continue;
p.replace_instruction(ins, eq);
m.replace_instruction(ins, eq);
processed_ins.emplace(ins);
auto outputs = eq->outputs();
std::vector<instruction_ref> outputs;
std::copy_if(eq->outputs().begin(),
eq->outputs().end(),
std::back_inserter(outputs),
[&](auto x) { return m.has_instruction(x); });
std::sort(outputs.begin(), outputs.end(), [&](auto x, auto y) {
return std::distance(eq, x) < std::distance(eq, y);
});
cse_range(p, outputs);
cse_range(m, outputs);
}
instructions.emplace(ins->name(), ins);
}
}
void eliminate_common_subexpression::apply(module& p) const { cse_range(p, iterator_for(p)); }
void eliminate_common_subexpression::apply(module& m) const { cse_range(m, iterator_for(m)); }
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -13,9 +13,9 @@
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void eliminate_concat::apply(module& p) const
void eliminate_concat::apply(module& m) const
{
for(auto ins : iterator_for(p))
for(auto ins : iterator_for(m))
{
// Look for the concat operator
if(ins->name() != concat_opt.name())
......@@ -64,22 +64,22 @@ void eliminate_concat::apply(module& p) const
std::sort(sorted_allocations.begin(),
sorted_allocations.end(),
[&](instruction_ref x, instruction_ref y) {
return std::distance(p.begin(), x) < std::distance(p.begin(), y);
return std::distance(m.begin(), x) < std::distance(m.begin(), y);
});
// Move "super" allocation to the front
auto first = sorted_allocations.front();
auto super = p.move_instruction(last, first);
auto super = m.move_instruction(last, first);
// Replace each allocation with a load
std::size_t offset = 0;
for(auto alloc : allocations)
{
op::load op{alloc->get_shape(), offset};
p.replace_instruction(alloc, op, {super});
m.replace_instruction(alloc, op, {super});
offset += alloc->get_shape().bytes();
}
std::vector<instruction_ref> args = {super};
std::copy(ins->inputs().begin(), ins->inputs().end() - 1, std::back_inserter(args));
p.replace_instruction(ins, migraphx::make_op("identity"), args);
m.replace_instruction(ins, migraphx::make_op("identity"), args);
}
}
}
......
......@@ -6,16 +6,19 @@
#include <migraphx/stringutils.hpp>
#include <migraphx/op/contiguous.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/par_for.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
static bool try_compute_shape(instruction_ref ins, const std::vector<shape>& inputs)
static bool try_compute_shape(instruction_ref ins,
const std::vector<shape>& inputs,
const std::vector<module_ref>& mods)
{
try
{
shape new_shape = ins->get_operator().compute_shape(inputs);
shape new_shape = ins->get_operator().compute_shape(inputs, mods);
// If the output shape is a standard shape, no need to try its output
if(new_shape.standard())
{
......@@ -45,7 +48,7 @@ static bool try_compute_shape(instruction_ref ins, const std::vector<shape>& inp
return (arg == ins) ? new_shape : arg->get_shape();
});
if(!try_compute_shape(output, input_shapes))
if(!try_compute_shape(output, input_shapes, mods))
{
return false;
}
......@@ -59,64 +62,60 @@ static bool try_compute_shape(instruction_ref ins, const std::vector<shape>& inp
return true;
}
static bool try_compute_shape(instruction_ref ins, const std::vector<instruction_ref>& args)
static bool try_compute_shape(instruction_ref ins,
const std::vector<instruction_ref>& args,
const std::vector<module_ref>& mods)
{
auto inputs = to_shapes(args);
return try_compute_shape(ins, inputs);
return try_compute_shape(ins, inputs, mods);
}
void eliminate_contiguous::apply(module& p) const
void eliminate_contiguous::apply(module& m) const
{
for(auto ins : iterator_for(p))
std::vector<instruction_ref> const_instruction;
for(auto ins : iterator_for(m))
{
// return instruction should have inputs with standard shape
if(ins->name() == "@return")
continue;
if(std::none_of(ins->inputs().begin(), ins->inputs().end(), [&](auto arg) {
return arg->name() == op_name;
}))
continue;
// Make a copy so we can modify it while we iterate
auto args = ins->inputs();
auto new_args = args;
std::transform(new_args.begin(), new_args.end(), new_args.begin(), [&](auto arg) {
if(arg->name() == op_name)
return arg->inputs().front();
else
return arg;
});
assert(args.size() == new_args.size());
auto mod_args = ins->module_inputs();
if(try_compute_shape(ins, new_args))
for(auto arg : ins->inputs())
{
p.replace_instruction(ins, ins->get_operator(), new_args);
}
else
{
for(auto arg : ins->inputs())
if(arg->name() == op_name)
{
if(arg->name() == op_name)
auto prev = arg->inputs().front();
replace(new_args, arg, prev);
if(try_compute_shape(ins, new_args, mod_args))
{
new_args = args;
auto prev = arg->inputs().front();
replace(new_args, arg, prev);
if(try_compute_shape(ins, new_args))
{
instruction::replace_argument(ins, arg, prev);
}
else if(prev->can_eval())
{
auto c = op::contiguous{};
auto r = c.compute(c.compute_shape({prev->get_shape()}), {prev->eval()});
auto l = p.add_literal(r.get_shape(), r.data());
p.replace_instruction(arg, l);
}
instruction::replace_argument(ins, arg, prev);
}
else if(prev->can_eval())
{
const_instruction.push_back(arg);
}
}
}
}
// Perform evaluations in parallel
std::vector<argument> literals(const_instruction.size());
par_for(const_instruction.size(), 1, [&](const auto i) {
auto c = op::contiguous{};
auto prev = const_instruction[i]->inputs().front();
literals[i] = c.compute(c.compute_shape({prev->get_shape()}), {prev->eval()});
});
for(size_t i = 0; i < const_instruction.size(); i++)
{
auto l = m.add_literal(literals[i].get_shape(), literals[i].data());
m.replace_instruction(const_instruction[i], l);
}
}
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -3,17 +3,26 @@
#include <migraphx/iterator_for.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ranges.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void eliminate_data_type::apply(module& m) const
{
static const std::vector<std::string> skip_op_names = {"convert",
"get_tuple_elem",
"if",
"loop",
"roialign",
"scatternd_add",
"scatternd_mul",
"scatternd_none"};
for(auto ins : iterator_for(m))
{
if(ins->name()[0] == '@')
continue;
if(ins->name() == "convert")
if(contains(skip_op_names, ins->name()))
continue;
auto inputs = ins->inputs();
std::transform(inputs.begin(), inputs.end(), inputs.begin(), [&](auto i) {
......
......@@ -8,21 +8,21 @@
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
void eliminate_identity::apply(module& p) const
void eliminate_identity::apply(module& m) const
{
auto last = std::prev(p.end());
for(auto ins : iterator_for(p))
auto last = std::prev(m.end());
for(auto ins : iterator_for(m))
{
// Skip the first instruction, since we always process the previous
// instruction
if(ins == p.begin())
if(ins == m.begin())
continue;
const auto i = std::prev(ins);
if(i->name() == "identity")
{
p.replace_instruction(i, i->inputs().front());
p.move_instruction(i, p.end());
m.replace_instruction(i, i->inputs().front());
m.move_instruction(i, m.end());
}
if(ins == last)
{
......@@ -31,7 +31,7 @@ void eliminate_identity::apply(module& p) const
const instruction_ref& identity_input = ins->inputs().front();
if(identity_input->outputs().size() == 1)
{
p.move_instruction(identity_input, i);
m.move_instruction(identity_input, i);
// since this is the last instruction, removing it only
// requires changing "last" and calling remove below
last = std::prev(last);
......@@ -40,7 +40,7 @@ void eliminate_identity::apply(module& p) const
break;
}
}
p.remove_instructions(std::next(last), p.end());
m.remove_instructions(std::next(last), m.end());
}
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -44,7 +44,7 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
static void update_pooling(const instruction_ref& input, const instruction_ref& ins, module& m)
{
auto op = any_cast<op::pooling>(ins->get_operator());
if(op.mode == "average")
if(op.mode == op::pooling_mode::average)
{
return;
}
......
#include <migraphx/fuse_pointwise.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/ranges.hpp>
#include <iterator>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
static literal get_scalar(instruction_ref ins)
{
if(ins->name() == "contiguous")
return get_scalar(ins->inputs().front());
const auto& s = ins->get_shape();
if(not(s.elements() == 1 or s.scalar()))
return {};
if(not ins->can_eval())
return {};
auto e = ins->eval();
literal r{};
e.visit_at([&](auto x) { r = literal{x}; });
return r;
}
static void create_pointwise_modules(module_pass_manager& mpm)
{
std::size_t n = 0;
for(auto ins : iterator_for(mpm.get_module()))
{
if(not ins->get_operator().attributes().get("pointwise", false))
continue;
assert(ins->get_operator().attributes().contains("point_op"));
auto* pm = mpm.create_module(mpm.get_module().name() + ":pointwise" + std::to_string(n++));
pm->set_bypass();
std::unordered_map<instruction_ref, instruction_ref> param_map;
std::vector<instruction_ref> pointwise_inputs;
std::size_t i = 0;
for(auto input : ins->inputs())
{
if(contains(param_map, input))
continue;
auto scalar = get_scalar(input);
if(scalar.empty())
{
pointwise_inputs.push_back(input);
param_map[input] =
pm->add_parameter("x" + std::to_string(i), shape{input->get_shape().type()});
i++;
}
else
{
param_map[input] = pm->add_literal(scalar);
}
}
std::vector<instruction_ref> inputs;
std::transform(ins->inputs().begin(),
ins->inputs().end(),
std::back_inserter(inputs),
[&](auto input) { return param_map[input]; });
auto r = pm->add_instruction(ins->get_operator(), inputs);
pm->add_return({r});
mpm.get_module().replace_instruction(ins, make_op("pointwise"), pointwise_inputs, {pm});
}
}
static std::vector<instruction_ref> append_pointwise_module(instruction_ref ins,
instruction_ref output)
{
assert(contains(output->inputs(), ins));
module_ref pm = ins->module_inputs().at(0);
module_ref xm = output->module_inputs().at(0);
auto last = std::prev(pm->end());
assert(last->name() == "@return");
assert(last->inputs().size() == 1);
assert(pm->get_parameter_names().size() == ins->inputs().size());
assert(xm->get_parameter_names().size() == output->inputs().size());
std::vector<instruction_ref> inputs = ins->inputs();
std::unordered_map<instruction_ref, instruction_ref> map_ins;
std::unordered_map<instruction_ref, instruction_ref> input_map;
// Copy inputs to input_map
for(auto i : range(inputs.size()))
{
auto input = inputs[i];
auto param = pm->get_parameter("x" + std::to_string(i));
assert(param != pm->end());
input_map[input] = param;
}
// Add the new parameter and additional inputs
for(auto i : range(output->inputs().size()))
{
auto input = output->inputs()[i];
auto param = xm->get_parameter("x" + std::to_string(i));
assert(param != xm->end());
if(input == ins)
{
map_ins[param] = last->inputs().front();
input_map[input] = map_ins[param];
}
// Avoid duplicate paramter inputs
else if(contains(input_map, input))
{
map_ins[param] = input_map[input];
}
else
{
map_ins[param] =
pm->add_parameter("x" + std::to_string(inputs.size()), {input->get_shape().type()});
inputs.push_back(input);
input_map[input] = map_ins[param];
}
}
pm->replace_return(pm->insert_module_instructions(last, xm, map_ins));
return inputs;
}
static bool find_pointwise_modules(module& m)
{
bool changed = false;
auto last = std::prev(m.end());
for(auto ins : iterator_for(m))
{
if(ins->name() != "pointwise")
continue;
if(ins->outputs().empty() and ins != last)
continue;
auto it = std::find_if(ins->inputs().begin(), ins->inputs().end(), [&](auto i) {
return i->name() == "pointwise" and i->outputs().size() == 1;
});
if(it == ins->inputs().end())
continue;
auto input = *it;
auto new_inputs = append_pointwise_module(input, ins);
m.replace_instruction(input, input->get_operator(), new_inputs, input->module_inputs());
m.replace_instruction(ins, input);
m.move_instruction(input, ins);
changed = true;
}
return changed;
}
void fuse_pointwise::apply(module_pass_manager& mpm) const
{
create_pointwise_modules(mpm);
mpm.run_pass(dead_code_elimination{});
for(int i = 0; i < 8; i++)
{
if(not find_pointwise_modules(mpm.get_module()))
break;
mpm.run_pass(dead_code_elimination{});
}
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -6,32 +6,59 @@ inline namespace MIGRAPHX_INLINE_NS {
argument fill_argument(shape s, unsigned long value)
{
argument result;
s.visit_type([&](auto as) {
using type = typename decltype(as)::type;
auto v = fill_tensor_data<type>(s, value);
result = {s, v};
});
if(s.type() == shape::tuple_type)
{
std::vector<argument> sub_args;
const auto& sub_ss = s.sub_shapes();
std::transform(sub_ss.begin(), sub_ss.end(), std::back_inserter(sub_args), [&](auto ss) {
return fill_argument(ss, value);
});
result = argument(sub_args);
}
else
{
s.visit_type([&](auto as) {
using type = typename decltype(as)::type;
auto v = fill_tensor_data<type>(s, value);
result = {s, v};
});
}
return result;
}
argument generate_argument(shape s, unsigned long seed)
{
argument result;
s.visit_type([&](auto as) {
// we use char type to store bool type internally, so bool_type
// needs special processing to generate data
if(s.type() == shape::bool_type)
{
auto v = generate_tensor_data<bool>(s, seed);
result = {s, v};
}
else
{
using type = typename decltype(as)::type;
auto v = generate_tensor_data<type>(s, seed);
result = {s, v};
}
});
if(s.type() == shape::tuple_type)
{
const auto& sub_ss = s.sub_shapes();
std::vector<argument> sub_args;
std::transform(sub_ss.begin(), sub_ss.end(), std::back_inserter(sub_args), [&](auto ss) {
return generate_argument(ss, seed);
});
result = argument(sub_args);
}
else
{
s.visit_type([&](auto as) {
// we use char type to store bool type internally, so bool_type
// needs special processing to generate data
if(s.type() == shape::bool_type)
{
auto v = generate_tensor_data<bool>(s, seed);
result = {s, v};
}
else
{
using type = typename decltype(as)::type;
auto v = generate_tensor_data<type>(s, seed);
result = {s, v};
}
});
}
return result;
}
......
......@@ -13,7 +13,7 @@ struct adjust_allocation
{
allocation_model model;
std::string name() const { return "adjust_allocation"; }
void apply(module& p) const;
void apply(module& m) const;
};
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -32,18 +32,22 @@ struct allocation_model
#else
/*
* Type-erased interface for:
*
* struct allocation_model
* {
* std::string name() const;
* std::string copy() const;
* operation allocate(const shape& s) const;
* operation preallocate(const shape& s,std::string id) const;
* };
*
*/
#ifdef TYPE_ERASED_DECLARATION
// Type-erased interface for:
struct allocation_model
{
//
std::string name() const;
//
std::string copy() const;
//
operation allocate(const shape& s) const;
//
operation preallocate(const shape& s, std::string id) const;
};
#else
struct allocation_model
{
......@@ -260,6 +264,7 @@ inline const ValueType& any_cast(const allocation_model& x)
throw std::bad_cast();
return *y;
}
#endif
#endif
......
......@@ -16,7 +16,7 @@ struct stream_race
instruction_ref before;
};
std::vector<stream_race> analyze_streams(const module& p, const stream_model& m);
std::vector<stream_race> analyze_streams(const module& m, const stream_model& strmm);
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
#ifndef MIGRAPHX_GUARD_MIGRAPHX_ANY_PTR_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_ANY_PTR_HPP
#include <migraphx/config.hpp>
#include <migraphx/optional.hpp>
#include <migraphx/errors.hpp>
#include <migraphx/type_name.hpp>
#include <cassert>
#include <string_view>
#include <typeindex>
#include <type_traits>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct any_ptr
{
any_ptr() = default;
template <class T>
any_ptr(T* p) : ptr(p), ti(typeid(T*)), name(get_name<T*>())
{
}
any_ptr(void* p, std::string_view pname) : ptr(p), name(pname) {}
void* get(std::string_view n) const
{
if(name != n)
MIGRAPHX_THROW("any_ptr: type mismatch: " + std::string{name} +
" != " + std::string{n});
return ptr;
}
template <class T>
T get() const
{
static_assert(std::is_pointer<T>{}, "Must be a pointer");
assert(ptr != nullptr);
if(ti and std::type_index{typeid(T)} != *ti)
MIGRAPHX_THROW("any_ptr: type mismatch: " + std::string{name} + " != " + get_name<T>());
else if(name != get_name<T>())
MIGRAPHX_THROW("any_ptr: type mismatch: " + std::string{name} + " != " + get_name<T>());
return reinterpret_cast<T>(ptr);
}
void* unsafe_get() const { return ptr; }
private:
void* ptr = nullptr;
optional<std::type_index> ti = nullopt;
std::string_view name = "";
template <class T>
static const std::string& get_name()
{
return get_type_name<std::remove_cv_t<std::remove_pointer_t<T>>>();
}
};
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_MIGRAPHX_ANY_PTR_HPP
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment