Commit 9686cb33 authored by charlie's avatar charlie
Browse files

Merge branch 'select_module_op' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_batch_pass

parents 84725d72 dd74a89a
......@@ -54,6 +54,10 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
*/
struct module
{
// used by replace_allocate pass
// allocate memory in this module rather than using output parmaeters
bool use_local_alloc = false;
module(const std::string& name = "");
// move constructor
......
......@@ -26,8 +26,6 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/module.hpp>
#include <migraphx/dyn_output.hpp>
#include <set>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -35,98 +33,59 @@ namespace op {
struct select_module
{
// output shape of the dynamic model
shape output_dyn_shape;
int input_batch_index = -1;
int output_batch_index = -1;
std::string dyn_batch_param_name;
shape output_dyn_shapes;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.output_dyn_shape, "output_dyn_shape"),
f(self.input_batch_index, "input_batch_index"),
f(self.output_batch_index, "output_batch_index"),
f(self.dyn_batch_param_name, "dyn_batch_param_name"));
return pack(f(self.output_dyn_shapes, "output_dyn_shapes"));
}
std::string name() const { return "select_module"; }
// runs once during model compilation with dynamic shape input
// may run on each model evaluation with static shape input
shape compute_shape(std::vector<shape> inputs) const
shape compute_shape(const std::vector<shape>&, std::vector<module_ref>) const
{
check_shapes{inputs, *this, true}.has(1);
auto s0 = inputs.at(0);
if(s0.dynamic())
{
// should we check that the submodules have the same parameters here?
// check that no more than one parameter is non-fixed?
// would need to use version of compute_shape with the parameter list
return shape{output_dyn_shape};
}
else
{
auto batch_size = s0.lens().at(input_batch_index);
auto dds = output_dyn_shape.dyn_dims();
dds.at(output_batch_index) = {batch_size, batch_size};
std::vector<std::size_t> dims;
if(std::all_of(dds.begin(), dds.end(), [](auto dd) { return dd.is_fixed(); }))
{
std::transform(
dds.begin(), dds.end(), std::back_inserter(dims), [](auto d) { return d.max; });
return {output_dyn_shape.type(), dims};
}
else
{
MIGRAPHX_THROW("SELECT_MODULE: more than one input dimension was non-fixed");
}
}
return shape{output_dyn_shapes};
}
argument compute(const dyn_output& dyn_out,
argument compute(const shape&,
const std::vector<argument>& args,
const std::vector<module_ref>& submodule_list,
const std::function<std::vector<argument>(
module_ref&, const std::unordered_map<std::string, argument>&)>& run) const
{
std::vector<module_ref> modules_to_run;
for(const auto& mod : submodule_list)
{
// find submodule with the same parameter shape as the input data
auto p_shape = mod->get_parameter_shape(dyn_batch_param_name);
if(p_shape == args.at(0).get_shape())
{
modules_to_run.push_back(mod);
break;
}
}
// TODO if an exact match is not found, assemble module list from binary base
// find submodule with input parameter shapes exactly the same as the input arguments
// assuming arguments are in the same order as the input parameters
auto module_iter =
std::find_if(submodule_list.cbegin(), submodule_list.cend(), [&](module_ref mr) {
auto param_names = mr->get_parameter_names();
assert(param_names.size() <= args.size());
return std::equal(param_names.cbegin(),
param_names.cend(),
args.cbegin(),
[&](auto p_name, auto a) {
return a.get_shape() == mr->get_parameter_shape(p_name);
});
});
if(modules_to_run.empty())
{
MIGRAPHX_THROW("SELECT_MODULE: no compatible submodules found for input shape: " +
migraphx::to_string(args.at(0).get_shape()));
}
std::set<std::string> pnames;
for(const auto& mod : modules_to_run)
if(module_iter == submodule_list.end())
{
// TODO If all the modules have the same parameters, this would only need to run once
auto names = mod->get_parameter_names();
pnames.insert(names.begin(), names.end());
MIGRAPHX_THROW("SELECT_MODULE: no compatible submodules found for given input shapes");
}
assert(pnames.size() <= args.size());
auto module_to_run = *module_iter;
std::unordered_map<std::string, argument> params;
std::transform(pnames.begin(),
pnames.end(),
// add input parameters
auto param_names = module_to_run->get_parameter_names();
assert(param_names.size() <= args.size());
std::transform(param_names.begin(),
param_names.end(),
args.begin(),
std::inserter(params, params.end()),
[](auto&& name, auto&& arg) { return std::make_pair(name, arg); });
[](auto&& name, auto&& a) { return std::make_pair(name, a); });
// TODO run multiple modules and split the parameter data to each batch size
auto results = run(modules_to_run.at(0), params);
return results.at(0);
auto results = run(module_to_run, params);
return argument{results};
}
};
......
......@@ -140,9 +140,9 @@ template <class T>
auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs))
{
dependent_type<operation, T> y = x;
if(inputs.empty())
MIGRAPHX_THROW("At least one input is required for " + x.name());
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
......@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
template <class T>
auto mod_compute_shape_op(rank<2>,
auto mod_compute_shape_op(rank<1>,
const T& x,
const std::vector<shape>& inputs,
const std::vector<module_ref>& mod_args)
......@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return x.compute_shape(inputs, mod_args);
}
template <class T>
auto mod_compute_shape_op(rank<1>,
const T& x,
const std::vector<shape>& inputs,
const std::vector<module_ref>&) -> decltype(x.compute_shape(inputs))
{
return x.compute_shape(inputs);
}
template <class T>
shape mod_compute_shape_op(rank<0>,
const T& x,
......
......@@ -243,6 +243,9 @@ struct shape
/// Return true if the shape is dynamic
bool dynamic() const;
/// Return true if this shape or any of the sub_shapes are dynamic
bool any_of_dynamic() const;
shape normalize_standard() const;
shape with_lens(type_t t, const std::vector<std::size_t>& l) const;
......
......@@ -379,7 +379,7 @@ std::vector<argument> generic_eval(const module* mod,
}));
}
assert(results.find(ins) != results.end());
if(not ins->get_shape().dynamic())
if(not ins->get_shape().any_of_dynamic())
{
assert(results.at(ins).get_shape() == ins->get_shape());
}
......
......@@ -104,19 +104,17 @@ void replace_allocate::apply(module& m) const
continue;
auto s = ins->get_shape();
if(not main_offload_copy and model.needs_out_params() and contains(mod_output_names, ins))
if(not main_offload_copy and not(m.use_local_alloc) and model.needs_out_params() and
contains(mod_output_names, ins))
{
auto out_param = m.add_parameter(mod_output_names[ins], s);
m.replace_instruction(ins, out_param);
continue;
}
m.replace_instruction(
ins,
m.insert_instruction(ins,
make_op(model.name(), migraphx::value{{"shape", to_value(s)}})));
else
{
m.replace_instruction(ins,
make_op(model.name(), migraphx::value{{"shape", to_value(s)}}));
}
}
}
......
......@@ -483,6 +483,17 @@ std::string shape::type_string() const { return name(this->type()); }
bool shape::dynamic() const { return not impl->m_dyn_dims.empty(); }
bool shape::any_of_dynamic() const
{
if(this->dynamic())
{
return true;
}
return std::any_of(this->sub_shapes().cbegin(), this->sub_shapes().cend(), [](auto s) {
return s.any_of_dynamic();
});
}
const std::vector<shape::dynamic_dimension>& shape::dyn_dims() const { return impl->m_dyn_dims; }
std::vector<std::size_t> shape::min_lens() const
......
......@@ -111,6 +111,7 @@ struct miopen_apply
add_loop_op();
add_neg_op();
add_nms_op();
add_select_module_op();
}
void copy_params() const
......@@ -358,6 +359,33 @@ struct miopen_apply
return mod->replace_instruction(ins, gpu_out);
});
}
/**
* Turns on use_local_alloc in the select_module submodules.
* Changes the submodule returns to a hip::sync_stream.
*/
void add_select_module_op()
{
apply_map.emplace("select_module", [=](instruction_ref ins) {
std::vector<instruction_ref> inputs = ins->inputs();
auto mod_args = ins->module_inputs();
for(auto smod : mod_args)
{
smod->use_local_alloc = true;
auto last_ins = std::prev(smod->end());
if(last_ins->name() == "@return")
{
for(auto out_ins : last_ins->inputs())
{
auto sync_out = smod->insert_instruction(
last_ins, make_op("hip::sync_stream"), out_ins);
smod->replace_return({sync_out});
}
}
}
return ins;
});
}
};
void lowering::apply(module& m) const { miopen_apply{&m, this}.apply(); }
......
......@@ -2364,25 +2364,15 @@ TEST_CASE(rnn)
TEST_CASE(select_module_dyn)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3}, {255, 255}, {255, 255}}};
migraphx::shape out_attr = migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1000, 1000}}};
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1000, 1000}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
expect_shape(
migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1000, 1000}}},
migraphx::make_op("select_module", {{"output_dyn_shape", migraphx::to_value(out_attr)}}),
out_attr,
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
input);
}
TEST_CASE(select_module_static)
{
migraphx::shape input{migraphx::shape::float_type, {3, 3, 255, 255}};
migraphx::shape out_attr = migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1000, 1000}}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 1000}},
migraphx::make_op("select_module",
{{"output_dyn_shape", migraphx::to_value(out_attr)},
{"output_batch_index", 0},
{"input_batch_index", 0}}),
input);
}
TEST_CASE(slice_shape)
{
migraphx::shape input{migraphx::shape::int32_type, {2, 2, 3}};
......
This diff is collapsed.
......@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT(s3 != s4);
}
TEST_CASE(any_of_dynamic_true)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s0{sub_shapes};
EXPECT(s0.any_of_dynamic());
sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 1}, {4, 4}}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s1{sub_shapes};
EXPECT(s1.any_of_dynamic());
}
TEST_CASE(any_of_dynamic_false)
{
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {1, 4}});
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s{sub_shapes};
EXPECT(not s.any_of_dynamic());
}
TEST_CASE(test_shape_packed)
{
migraphx::shape s{migraphx::shape::float_type, {2, 2}, {2, 1}};
......
......@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes())
{
m[x.first] = migraphx::generate_argument(x.second, get_hash(x.first));
if(x.second.dynamic())
{
// create static shape using maximum dimensions
migraphx::shape static_shape{x.second.type(), x.second.max_lens()};
m[x.first] = migraphx::generate_argument(static_shape, get_hash(x.first));
}
else
{
m[x.first] = migraphx::generate_argument(x.second, get_hash(x.first));
}
}
auto gold_f = detach_async([=] { return run_ref(p, m); });
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_add : verify_program<test_select_module_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm->add_literal(migraphx::literal{lit_s, {6}});
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, std::string module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins =
submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_select_module_reduce : verify_program<test_select_module_reduce>
{
migraphx::program create_program() const
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, std::string module_name) {
auto submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module",
{{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
return p;
}
};
......@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
template <class T>
auto mod_compute_shape_op(rank<2>,
auto mod_compute_shape_op(rank<1>,
const T& x,
const std::vector<shape>& inputs,
const std::vector<module_ref>& mod_args)
......@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return x.compute_shape(inputs, mod_args);
}
template <class T>
auto mod_compute_shape_op(rank<1>,
const T& x,
const std::vector<shape>& inputs,
const std::vector<module_ref>&) -> decltype(x.compute_shape(inputs))
{
return x.compute_shape(inputs);
}
template <class T>
shape mod_compute_shape_op(rank<0>,
const T& x,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment