Commit b3955af4 authored by Paul's avatar Paul
Browse files

Merge

parents 1af49c6f c0398ded
......@@ -33,6 +33,16 @@ struct tf_parser
instruction_ref add_broadcastable_binary_op(const std::string& op_name,
instruction_ref arg0,
instruction_ref arg1) const;
instruction_ref add_common_op(const std::string& op_name,
std::vector<instruction_ref> inputs) const;
template <class... Ts>
instruction_ref add_common_op(const std::string& op_name, Ts... xs) const
{
return add_common_op(op_name, {xs...});
}
instruction_ref add_instruction(const operation& op,
const std::vector<instruction_ref>& args) const;
......
......@@ -18,15 +18,10 @@ struct parse_relu6 : op_parser<parse_relu6>
const tf_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto input_lens = args[0]->get_shape().lens();
auto min_val = info.add_literal(0.0f);
auto max_val = info.add_literal(6.0f);
auto min_val = info.add_literal(0.0f);
auto max_val = info.add_literal(6.0f);
min_val =
info.add_instruction(make_op("multibroadcast", {{"out_lens", input_lens}}), min_val);
max_val =
info.add_instruction(make_op("multibroadcast", {{"out_lens", input_lens}}), max_val);
return info.add_instruction(make_op("clip"), args.front(), min_val, max_val);
return info.add_common_op("clip", args[0], min_val, max_val);
}
};
......
......@@ -79,7 +79,13 @@ instruction_ref tf_parser::node_info::add_broadcastable_binary_op(const std::str
instruction_ref arg0,
instruction_ref arg1) const
{
return add_common_op(*mm, make_op(op_name), {arg0, arg1});
return this->add_common_op(op_name, arg0, arg1);
}
instruction_ref tf_parser::node_info::add_common_op(const std::string& op_name,
std::vector<instruction_ref> inputs) const
{
return migraphx::add_common_op(*mm, make_op(op_name), std::move(inputs));
}
int64_t tf_parser::parse_axis(const int64_t dim, const size_t num_dims) const
......
......@@ -6,6 +6,7 @@
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/op/add.hpp>
......@@ -20,12 +21,15 @@
void run_lowering(migraphx::program& p, bool offload_copy = false)
{
auto ctx = migraphx::gpu::context{};
migraphx::run_passes(*p.get_main_module(),
{migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, offload_copy},
migraphx::dead_code_elimination{},
migraphx::eliminate_contiguous{"gpu::contiguous"},
migraphx::dead_code_elimination{}});
migraphx::run_passes(
*p.get_main_module(),
{migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, offload_copy},
migraphx::dead_code_elimination{},
migraphx::eliminate_contiguous{"gpu::contiguous"},
migraphx::dead_code_elimination{},
migraphx::replace_allocate{migraphx::gpu::gpu_allocation_model{}, offload_copy},
migraphx::dead_code_elimination{}});
}
TEST_CASE(tanh_shape)
......
......@@ -2,13 +2,14 @@
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/allocation_model.hpp>
#include <migraphx/apply_alpha_beta.hpp>
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/pass_manager.hpp>
......@@ -22,6 +23,8 @@ void run_passes(migraphx::module& m)
{migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, false},
migraphx::dead_code_elimination{},
migraphx::replace_allocate{migraphx::gpu::gpu_allocation_model{}},
migraphx::dead_code_elimination{},
migraphx::gpu::pack_int8_args{},
migraphx::dead_code_elimination{}});
}
......
......@@ -312,4 +312,18 @@ TEST_CASE(module_without_bypass)
EXPECT(found);
}
TEST_CASE(multiple_module_dependency)
{
// Test when an instruction from a submodule depends on previous module
migraphx::program p;
auto* mm = p.get_main_module();
auto* sub = p.create_module("sub");
auto l1 = mm->add_literal(migraphx::literal(3));
// second same literal to make sure instruction_ref is being compared, rather than the
// instructions
sub->add_literal(migraphx::literal(3));
sub->add_instruction(sum_op{}, l1, l1);
EXPECT((sub->validate() == sub->end()));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
#include <migraphx/allocation_model.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/register_op.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct allocate_no_out : migraphx::auto_register_op<allocate_no_out>
{
migraphx::shape s{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate_no_out"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
migraphx::check_shapes{inputs, *this}.has(0);
return s;
}
migraphx::argument compute(migraphx::context&,
const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const
{
return {output_shape};
}
};
struct allocate_with_out : migraphx::auto_register_op<allocate_with_out>
{
migraphx::shape s{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate_with_out"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
migraphx::check_shapes{inputs, *this}.has(0);
return s;
}
migraphx::argument compute(migraphx::context&,
const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const
{
return {output_shape};
}
};
// allocation model that has no out params
struct allocation_no_out_model
{
std::string name() const { return "allocate_no_out"; }
migraphx::operation allocate(const migraphx::shape& s) const
{
return migraphx::make_op(name(), {{"shape", to_value(s)}});
}
migraphx::operation preallocate(const migraphx::shape&, const std::string&) const { return {}; }
std::string copy() const { return {}; }
bool needs_out_params() const { return false; }
};
// allocation model with out params
struct allocation_with_out_model
{
std::string name() const { return "allocate_with_out"; }
migraphx::operation allocate(const migraphx::shape& s) const
{
return migraphx::make_op(name(), {{"shape", to_value(s)}});
}
migraphx::operation preallocate(const migraphx::shape&, const std::string&) const { return {}; }
std::string copy() const { return {}; }
bool needs_out_params() const { return true; }
};
void run_pass(migraphx::module& m, migraphx::allocation_model model, bool offload_copy = false)
{
migraphx::run_passes(m,
{migraphx::replace_allocate{std::move(model), offload_copy},
migraphx::dead_code_elimination{}});
}
void run_pass(migraphx::program& p, migraphx::allocation_model model, bool offload_copy = false)
{
migraphx::run_passes(p,
{migraphx::replace_allocate{std::move(model), offload_copy},
migraphx::dead_code_elimination{}});
}
migraphx::module create_simple_program()
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {5}};
auto x = m.add_parameter("x", s);
auto y = m.add_parameter("y", s);
auto alloc =
m.add_instruction(migraphx::make_op("allocate", {{"shape", migraphx::to_value(s)}}));
m.add_instruction(pass_op{}, alloc, x, y);
return m;
}
TEST_CASE(allocate_no_out)
{
migraphx::module m = create_simple_program();
run_pass(m, allocation_no_out_model{});
EXPECT(std::any_of(m.begin(), m.end(), [](const migraphx::instruction& ins) {
return migraphx::contains(ins.name(), "allocate_no_out");
}));
}
TEST_CASE(allocate_with_out_param)
{
migraphx::module m = create_simple_program();
run_pass(m, allocation_with_out_model{});
EXPECT(std::none_of(m.begin(), m.end(), [](const migraphx::instruction& ins) {
return migraphx::contains(ins.name(), "allocate");
}));
}
TEST_CASE(allocate_with_out_return)
{
migraphx::module m = create_simple_program();
m.add_return({std::prev(m.end())});
run_pass(m, allocation_with_out_model{});
EXPECT(std::none_of(m.begin(), m.end(), [](const migraphx::instruction& ins) {
return migraphx::contains(ins.name(), "allocate");
}));
}
TEST_CASE(allocate_with_out_no_params)
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {5}};
auto x = m.add_parameter("x", s);
auto y = m.add_parameter("y", s);
auto z = m.add_parameter("z", s);
auto alloc =
m.add_instruction(migraphx::make_op("allocate", {{"shape", migraphx::to_value(s)}}));
auto pass1 = m.add_instruction(pass_op{}, alloc, x, y);
auto alloc2 =
m.add_instruction(migraphx::make_op("allocate", {{"shape", migraphx::to_value(s)}}));
m.add_instruction(pass_op{}, alloc2, z, pass1);
run_pass(m, allocation_with_out_model{});
EXPECT(std::any_of(m.begin(), m.end(), [](const migraphx::instruction& ins) {
return migraphx::contains(ins.name(), "allocate_with_out");
}));
}
TEST_CASE(if_allocate)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape cond_s{migraphx::shape::bool_type};
auto cond = mm->add_parameter("cond", cond_s);
migraphx::shape s{migraphx::shape::float_type, {5}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto* then_mod = p.create_module("If_0_if");
auto alloc = then_mod->add_instruction(
migraphx::make_op("allocate", {{"shape", migraphx::to_value(s)}}));
auto a1 = then_mod->add_instruction(pass_op{}, alloc, x);
then_mod->add_return({a1});
auto* else_mod = p.create_module("If_0_else");
auto alloc1 = else_mod->add_instruction(
migraphx::make_op("allocate", {{"shape", migraphx::to_value(s)}}));
auto a2 = else_mod->add_instruction(pass_op{}, alloc1, y);
else_mod->add_return({a2});
mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
run_pass(p, allocation_with_out_model{});
EXPECT(std::any_of(mm->begin(), mm->end(), [](const migraphx::instruction& ins) {
return migraphx::contains(ins.name(), "allocate_with_out");
}));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -471,6 +471,15 @@ def relu6_test(g1):
tf.nn.relu6(g1_input, 'relu6')
@tf_test
def relu6_mismatch_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float16,
shape=(1, 3, 13, 37),
name='0')
tf.nn.relu6(g1_input, 'relu6')
@tf_test
def reshape_test(g1):
with g1.as_default():
......@@ -676,6 +685,7 @@ if __name__ == '__main__':
pow_test()
relu_test()
relu6_test()
relu6_mismatch_test()
reshape_test()
rsqrt_test()
shape_test()
......
:
0 Placeholder*
dtype0*
shape: %

relu6Relu60*
T0"
\ No newline at end of file
......@@ -706,6 +706,31 @@ TEST_CASE(relu6_test)
EXPECT(p == prog);
}
TEST_CASE(relu6_mismatch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> input_lens{1, 3, 13, 37};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, input_lens});
auto min_val = mm->add_literal(0.0f);
auto max_val = mm->add_literal(6.0f);
auto l0_convert = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l0);
min_val = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
min_val);
max_val = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
max_val);
mm->add_instruction(migraphx::make_op("clip"), l0_convert, min_val, max_val);
auto prog = optimize_tf("relu6_mismatch_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(reshape_test)
{
migraphx::program p;
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {16, 16, 4, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p;
}
};
......@@ -28,6 +28,8 @@ struct allocation_model
operation allocate(const shape& s) const;
/// Create a preallocated operator for the given shape
operation preallocate(const shape& s, const std::string& id) const;
/// Check if outputs are to be inserted
bool needs_out_params() const;
};
#else
......@@ -37,7 +39,8 @@ interface('allocation_model',
virtual('name', returns='std::string', const=True),
virtual('copy', returns='std::string', const=True),
virtual('allocate', s='const shape&', returns='operation', const=True),
virtual('preallocate', s='const shape&', id='std::string', returns='operation', const=True)
virtual('preallocate', s='const shape&', id='std::string', returns='operation', const=True),
virtual('needs_out_params', returns='bool', const=True)
)
%>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment