Commit 623634ed authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_conv' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_nms

parents 271497af 604e4493
......@@ -48,9 +48,11 @@ void dead_code_elimination::apply(module& m) const
// Skip the last instruction
if(i == last)
break;
// Skip instruction with empty shape as output unless its a builtin or undefined or identity
// Skip instruction with empty shape as output unless its [dynamic, builtin, undefined,
// identity, allocate]
if((not i->get_shape().dynamic() and i->get_shape().elements() == 0) and
i->name().front() != '@' and not contains({"undefined", "identity", "allocate"}, i->name()))
i->name().front() != '@' and
not contains({"undefined", "identity", "allocate"}, i->name()))
continue;
assert(std::distance(m.begin(), i) <= std::distance(m.begin(), last));
std::unordered_set<instruction_ref> visited;
......
......@@ -42,7 +42,7 @@ struct select_dependent_type
template <class T, class... Ts>
using dependent_type = typename select_dependent_type<T, Ts...>::type;
bool normalize_attributes(operation& op, const shape& s);
bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens);
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......
......@@ -140,7 +140,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs))
{
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0]);
normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
......
......@@ -40,6 +40,7 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
auto val = op.to_value();
auto op_padding = val.at("padding").to_vector<size_t>();
// skip if shape is dynamic
if(input->get_shape().dynamic())
{
return;
......
......@@ -446,7 +446,7 @@ operation instruction::normalized_operator() const
if(this->need_normalization())
{
auto s = this->inputs().front()->get_shape();
if(!normalize_attributes(o, s))
if(!normalize_attributes(o, s.max_lens()))
return this->get_operator();
}
return o;
......
......@@ -150,22 +150,21 @@ auto tune_pad_attribute(const value& val)
return result;
}
bool normalize_attributes(operation& op, const shape& s)
bool normalize_attributes(operation& op, const std::vector<std::size_t>& lens)
{
bool tuned = false;
auto attrs = op.attributes();
auto val = op.to_value();
if(attrs.contains("normalize_padding"))
{
auto num_dims = s.max_lens().size();
auto padding = val.at(attrs.at("normalize_padding").to<std::string>());
auto padding_size = padding.size();
// for now, assume the dimensions to pad start at dim 2
auto padding_start = 2;
if(padding_size == 2 * (num_dims - padding_start))
if(padding_size == 2 * (lens.size() - padding_start))
tuned = true;
else if(padding_size != (num_dims - padding_start))
else if(padding_size != (lens.size() - padding_start))
MIGRAPHX_THROW("inconsistent padding size");
else
{
......@@ -195,7 +194,7 @@ bool normalize_attributes(operation& op, const shape& s)
axes = val.at("axes").without_key().to_vector<int64_t>();
}
auto vec = vv.to_vector<int64_t>();
auto result = tune_attribute(vec, axes, rv.without_key(), s.lens());
auto result = tune_attribute(vec, axes, rv.without_key(), lens);
val[key] = result;
op.from_value(val);
val = op.to_value();
......@@ -204,7 +203,7 @@ bool normalize_attributes(operation& op, const shape& s)
else
{
auto num = vv.to<int64_t>();
auto result = tune_attribute({num}, {num}, rv.without_key(), s.lens());
auto result = tune_attribute({num}, {num}, rv.without_key(), lens);
val[key] = result.front();
op.from_value(val);
val = op.to_value();
......
......@@ -45,7 +45,7 @@ void normalize_ops::apply(module& m) const
auto s = inputs[0]->get_shape();
migraphx::operation tuned_op = ins->get_operator();
if(normalize_attributes(tuned_op, s))
if(normalize_attributes(tuned_op, s.max_lens()))
{
m.replace_instruction(ins, tuned_op, inputs);
ins->set_normalized();
......
......@@ -139,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs))
{
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0]);
normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment