Commit ad774473 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

add test cases for the adjust_allocation pass

parent 7e7a8a61
...@@ -593,11 +593,7 @@ struct cpu_unary ...@@ -593,11 +593,7 @@ struct cpu_unary
{ {
Op op; Op op;
std::string name() const { return op.name(); } std::string name() const { return op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
{
return {inputs.front().type(), inputs.front().lens()};
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
...@@ -777,11 +773,7 @@ struct cpu_binary ...@@ -777,11 +773,7 @@ struct cpu_binary
{ {
Op op; Op op;
std::string name() const { return op.name(); } std::string name() const { return op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
{
return {inputs.front().type(), inputs.front().lens()};
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
......
...@@ -8,7 +8,7 @@ namespace gpu { ...@@ -8,7 +8,7 @@ namespace gpu {
shape miopen_abs::compute_shape(const std::vector<shape>& inputs) const shape miopen_abs::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).not_broadcasted(); check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1); return inputs.at(0);
} }
argument miopen_abs::compute(context& ctx, argument miopen_abs::compute(context& ctx,
......
...@@ -8,7 +8,7 @@ namespace gpu { ...@@ -8,7 +8,7 @@ namespace gpu {
shape miopen_tanh::compute_shape(const std::vector<shape>& inputs) const shape miopen_tanh::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).not_broadcasted(); check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1); return inputs.at(0);
} }
argument miopen_tanh::compute(context& ctx, argument miopen_tanh::compute(context& ctx,
......
#include <migraphx/gpu/adjust_allocation.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/op/add.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/op/tanh.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct lowering_target
{
std::string name() const { return "gpu::lowering"; }
std::vector<migraphx::pass> get_passes(migraphx::context& gctx) const
{
auto &ctx = migraphx::any_cast<migraphx::gpu::context>(gctx);
return {migraphx::gpu::lowering{ctx}, migraphx::dead_code_elimination{}};
}
migraphx::gpu::context get_context() const { return migraphx::gpu::context{}; }
};
TEST_CASE(trans_tanh)
{
auto create_program = []{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto x = p.add_parameter("x", s);
auto sm = p.add_instruction(migraphx::op::add{}, x, x);
p.add_instruction(migraphx::op::tanh{}, sm);
return p;
};
auto p1 = create_program();
auto p2 = create_program();
EXPECT(p1 == p2);
// relace the add instruction with using a incorrect
// output shape
for (auto ins : iterator_for(p1))
{
if (ins->name() == "add")
{
migraphx::shape wrong_s{migraphx::shape::float_type, {3, 2}};
migraphx::instruction::replace(ins, ins->get_operator(), wrong_s, ins->inputs());
}
if (ins->name() == "tanh")
{
migraphx::shape orig_s{migraphx::shape::float_type, {2, 3}};
migraphx::instruction::replace(ins, ins->get_operator(), orig_s, ins->inputs());
}
}
EXPECT(p1 != p2);
p1.compile(lowering_target{});
p2.compile(lowering_target{});
EXPECT(p1 != p2);
for (auto ins : iterator_for(p1))
{
if (ins->name() == "gpu::add")
{
migraphx::shape correct_s{migraphx::shape::float_type, {2, 3}};
migraphx::instruction::replace(ins, ins->get_operator(), correct_s, ins->inputs());
}
}
EXPECT(p1 != p2);
migraphx::run_passes(p1, {migraphx::gpu::adjust_allocation{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -327,6 +327,19 @@ struct test_tanh : verify_program<test_tanh> ...@@ -327,6 +327,19 @@ struct test_tanh : verify_program<test_tanh>
} }
}; };
struct test_trans_tanh : verify_program<test_trans_tanh>
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, x);
auto tanhx = p.add_instruction(migraphx::op::tanh{}, tx);
p.add_instruction(migraphx::op::add{}, tanhx, tanhx);
return p;
}
};
struct test_asin : verify_program<test_asin> struct test_asin : verify_program<test_asin>
{ {
migraphx::program create_program() const migraphx::program create_program() const
...@@ -674,6 +687,19 @@ struct test_abs : verify_program<test_abs> ...@@ -674,6 +687,19 @@ struct test_abs : verify_program<test_abs>
} }
}; };
struct test_trans_abs : verify_program<test_trans_abs>
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto tx = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, x);
auto tanhx = p.add_instruction(migraphx::op::abs{}, tx);
p.add_instruction(migraphx::op::add{}, tanhx, tanhx);
return p;
}
};
struct test_leaky_relu : verify_program<test_leaky_relu> struct test_leaky_relu : verify_program<test_leaky_relu>
{ {
migraphx::program create_program() const migraphx::program create_program() const
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment