Unverified Commit 94a7f6ee authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

select_module refactor (#1615)

Refactor to have select_module use output parameters
Disable select_module verify tests on cpu
parent 11e2451f
......@@ -54,10 +54,6 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
*/
struct module
{
// used by replace_allocate pass
// allocate memory in this module rather than using output parmaeters
bool use_local_alloc = false;
module(const std::string& name = "");
// move constructor
......
......@@ -44,7 +44,7 @@ struct allocate
std::string name() const { return "allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
migraphx::check_shapes{inputs, *this}.has(0);
migraphx::check_shapes{inputs, *this, true}.has(0);
return s;
}
argument compute(const shape& output_shape, const std::vector<argument>&) const
......
......@@ -43,50 +43,100 @@ struct select_module
std::string name() const { return "select_module"; }
shape compute_shape(const std::vector<shape>&, const std::vector<module_ref>&) const
shape compute_shape(const std::vector<shape>& inputs, const std::vector<module_ref>&) const
{
check_shapes{inputs, *this, true}.has_at_least(1);
return shape{output_dyn_shapes};
}
std::vector<std::string> get_input_parameter_names(module_ref mod) const
{
auto param_names = mod->get_parameter_names();
std::vector<std::string> ret;
std::copy_if(param_names.cbegin(),
param_names.cend(),
std::back_inserter(ret),
[](auto pn) { return not contains(pn, "#output_"); });
return ret;
}
std::vector<std::string> get_output_parameter_names(module_ref mod) const
{
auto param_names = mod->get_parameter_names();
std::vector<std::string> ret;
std::copy_if(param_names.cbegin(),
param_names.cend(),
std::back_inserter(ret),
[](auto pn) { return contains(pn, "#output_"); });
return ret;
}
argument compute(const shape&,
const std::vector<argument>& args,
const std::vector<module_ref>& submodule_list,
const std::function<std::vector<argument>(
module_ref&, const std::unordered_map<std::string, argument>&)>& run) const
{
// find submodule with input parameter shapes exactly the same as the input arguments
// assuming arguments are in the same order as the input parameters
// Find submodule with input parameter shapes exactly the same as the input instruction
// arguments. Assuming instruction arguments are in the same order as the instruction
// parameters.
auto module_iter =
std::find_if(submodule_list.cbegin(), submodule_list.cend(), [&](module_ref mr) {
auto param_names = mr->get_parameter_names();
assert(param_names.size() <= args.size());
return std::equal(param_names.cbegin(),
param_names.cend(),
args.cbegin(),
[&](auto p_name, auto a) {
return a.get_shape() == mr->get_parameter_shape(p_name);
});
auto in_param_names = get_input_parameter_names(mr);
auto param_shapes = mr->get_parameter_shapes();
assert(in_param_names.size() <= args.size());
return std::equal(
in_param_names.cbegin(),
in_param_names.cend(),
args.cbegin(),
[&](auto p_name, auto a) { return a.get_shape() == param_shapes[p_name]; });
});
if(module_iter == submodule_list.end())
{
MIGRAPHX_THROW("SELECT_MODULE: no compatible submodules found for given input shapes");
}
auto* module_to_run = *module_iter;
std::unordered_map<std::string, argument> params;
std::unordered_map<std::string, argument> p_map;
// add input parameters
auto param_names = module_to_run->get_parameter_names();
assert(param_names.size() <= args.size());
std::transform(param_names.begin(),
param_names.end(),
// add input parameters to parameter_map
auto in_param_names = get_input_parameter_names(module_to_run);
assert(in_param_names.size() <= args.size());
std::transform(in_param_names.begin(),
in_param_names.end(),
args.begin(),
std::inserter(params, params.end()),
[](auto&& name, auto&& a) { return std::make_pair(name, a); });
std::inserter(p_map, p_map.end()),
[&](auto&& name, auto&& a) { return std::make_pair(name, a); });
auto results = run(module_to_run, params);
// One tuple output parameter in main module to multiple output parameters in submodule
auto out_param_names = get_output_parameter_names(module_to_run);
auto output_sub_objects = args.back().get_sub_objects();
assert(out_param_names.size() == output_sub_objects.size());
std::transform(out_param_names.begin(),
out_param_names.end(),
output_sub_objects.begin(),
std::inserter(p_map, p_map.end()),
[&](auto&& name, auto&& a) {
auto ps = module_to_run->get_parameter_shape(name);
if(a.get_shape() != ps)
{
assert(ps.bytes() == a.get_shape().bytes());
return std::make_pair(name, a.reshape(ps));
}
else
{
return std::make_pair(name, a);
}
});
auto results = run(module_to_run, p_map);
return argument{results};
}
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
}
};
} // namespace op
......
......@@ -166,6 +166,7 @@ void module::assign(const module& m)
auto s = ins->get_shape();
copy_ins = impl->insert(impl->instructions.end(),
{builtin::param{name, order}, std::move(s), {}});
impl->nparams++;
}
else if(ins->name() == "@outline")
{
......
......@@ -104,8 +104,7 @@ void replace_allocate::apply(module& m) const
continue;
auto s = ins->get_shape();
if(not(main_offload_copy or m.use_local_alloc) and model.needs_out_params() and
contains(mod_output_names, ins))
if(not main_offload_copy and model.needs_out_params() and contains(mod_output_names, ins))
{
auto out_param = m.add_parameter(mod_output_names[ins], s);
m.replace_instruction(ins, out_param);
......
......@@ -327,10 +327,10 @@ struct stream_info
return [=](auto f) {
return fix<bool>([&](auto self, auto ins) {
return all_of(select(ins), [&](auto i) {
if(iweights.at(i) == 0)
return self(i);
else
if(has_stream(i))
return f(this->get_stream(i));
else
return self(i);
});
})(start);
};
......
......@@ -98,6 +98,13 @@ struct hip_sync_stream
return {};
return args.front();
}
std::ptrdiff_t output_alias(const std::vector<shape>& args) const
{
if(args.empty())
return -1;
return 0;
}
};
struct hip_copy_to_gpu
......
......@@ -361,29 +361,16 @@ struct miopen_apply
}
/**
* Turns on use_local_alloc in the select_module submodules.
* Changes the submodule returns to a hip::sync_stream.
* Adds dynamic allocation for submodule output parameter.
*/
void add_select_module_op()
{
apply_map.emplace("select_module", [=](instruction_ref ins) {
auto s = ins->get_shape();
auto output = insert_allocation(ins, s);
std::vector<instruction_ref> inputs = ins->inputs();
auto mod_args = ins->module_inputs();
for(auto* smod : mod_args)
{
smod->use_local_alloc = true;
auto last_ins = std::prev(smod->end());
if(last_ins->name() == "@return")
{
for(auto out_ins : last_ins->inputs())
{
auto sync_out = smod->insert_instruction(
last_ins, make_op("hip::sync_stream"), out_ins);
smod->replace_return({sync_out});
}
}
}
return ins;
inputs.push_back(output);
return mod->replace_instruction(ins, ins->get_operator(), inputs, ins->module_inputs());
});
}
};
......
......@@ -67,7 +67,13 @@ int main(int argc, const char* argv[])
{
run_verify rv;
rv.add_validation_for("gpu", &validate_gpu);
rv.disable_test_for("cpu", {"test_if_lp", "test_if_param", "test_if_literal"});
rv.disable_test_for("cpu",
{"test_if_lp",
"test_if_param",
"test_if_literal",
"test_select_module_add",
"test_select_module_reduce",
"test_select_module_conv"});
rv.disable_test_for("gpu", {"test_conv_bn_add"});
rv.run(argc, argv);
}
......@@ -43,9 +43,11 @@ struct test_select_module_add : verify_program<test_select_module_add>
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins =
auto add_ins0 =
submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
auto add_ins1 =
submod->add_instruction(migraphx::make_op("add"), add_ins0, broadcast_lit);
submod->add_return({add_ins0, add_ins1});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
......@@ -57,14 +59,18 @@ struct test_select_module_add : verify_program<test_select_module_add>
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
auto ret0 =
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
auto ret1 =
mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 1}}), sm_ins);
mm->add_return({ret0, ret1});
return p;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment