Commit b8090620 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into rnn_optimization

parents c2db3b96 3540f1b9
......@@ -346,60 +346,39 @@ TEST_CASE(gather)
}
}
TEST_CASE(logsoftmax)
template <class T>
void test_softmax_variations()
{
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 0;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::op::logsoftmax{axis},
input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{0}, input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::op::logsoftmax{axis},
input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{1}, input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 2;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::op::logsoftmax{axis},
input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{2}, input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 3;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::op::logsoftmax{axis},
input);
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}, T{3}, input);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 4;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}},
migraphx::op::logsoftmax{axis},
input);
throws_shape(T{axis}, input);
}
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = 5;
throws_shape(migraphx::op::logsoftmax{axis}, input);
}
TEST_CASE(softmax) { test_softmax_variations<migraphx::op::softmax>(); }
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
int axis = -1;
throws_shape(migraphx::op::logsoftmax{axis}, input);
}
}
TEST_CASE(logsoftmax) { test_softmax_variations<migraphx::op::logsoftmax>(); }
// 2 inputs arguments
TEST_CASE(matmul)
......
......@@ -29,12 +29,12 @@ TEST_CASE(basic_graph_test)
EXPECT(migraphx::contains(test, "\"@0\"[label=\"@literal\"]"));
EXPECT(migraphx::contains(test, "\"y\"[label=\"@param:y\"]"));
EXPECT(migraphx::contains(test, "\"x\"[label=\"@param:x\"]"));
EXPECT(migraphx::contains(test, "\"@3\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"@4\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"x\" -> \"@3\""));
EXPECT(migraphx::contains(test, "\"y\" -> \"@3\""));
EXPECT(migraphx::contains(test, "\"@3\" -> \"@4\""));
EXPECT(migraphx::contains(test, "\"@0\" -> \"@4\""));
EXPECT(migraphx::contains(test, "\"@1\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"@2\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"x\" -> \"@1\""));
EXPECT(migraphx::contains(test, "\"y\" -> \"@1\""));
EXPECT(migraphx::contains(test, "\"@1\" -> \"@2\""));
EXPECT(migraphx::contains(test, "\"@0\" -> \"@2\""));
EXPECT(migraphx::contains(test, "[label=\"int64_type, {1}, {0}\"]"));
}
......
:
0 Placeholder*
dtype0*
shape:

relu6Relu60*
T0"
\ No newline at end of file
......@@ -109,6 +109,7 @@ TEST_CASE(conv_test)
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
......@@ -119,6 +120,32 @@ TEST_CASE(conv_test)
EXPECT(p == prog);
}
TEST_CASE(depthwiseconv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
std::vector<float> weight_data(3 * 3 * 3 * 1);
std::fill(weight_data.begin(), weight_data.end(), 1.0f);
auto l1 =
p.add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 1}}, weight_data);
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
op.group = 3;
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
auto l3 = p.add_instruction(migraphx::op::transpose{{1, 3, 0, 2}}, l2);
auto l4 = p.add_instruction(migraphx::op::contiguous{}, l3);
auto l5 = p.add_instruction(migraphx::op::reshape{{3, 1, 3, 3}}, l4);
p.add_instruction(op, l0, l5);
auto prog = migraphx::parse_tf("depthwise_conv_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(identity_test)
{
migraphx::program p;
......@@ -144,6 +171,40 @@ TEST_CASE(matmul_test)
EXPECT(p == prog);
}
TEST_CASE(mean_test)
{
migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {2, 3}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
p.add_instruction(op, l0);
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
auto prog = migraphx::parse_tf("mean_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(mean_test_nhwc)
{
migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 2}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
p.add_instruction(op, l0);
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
auto prog = migraphx::parse_tf("mean_test_nhwc.pb", true);
EXPECT(p == prog);
}
TEST_CASE(mul_test)
{
migraphx::program p;
......@@ -212,8 +273,8 @@ TEST_CASE(pooling_test)
max_pool_op.stride = {2, 2};
avg_pool_op.lengths = {2, 2};
max_pool_op.lengths = {2, 2};
p.add_instruction(max_pool_op, l0);
p.add_instruction(avg_pool_op, l0);
p.add_instruction(max_pool_op, l0);
auto prog = migraphx::parse_tf("pooling_test.pb", true);
EXPECT(p == prog);
......@@ -229,6 +290,16 @@ TEST_CASE(relu_test)
EXPECT(p == prog);
}
TEST_CASE(relu6_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0);
auto prog = migraphx::parse_tf("relu6_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(reshape_test)
{
migraphx::program p;
......
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
#include <migraphx/half.hpp>
TEST_CASE(param_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
p.add_instruction(migraphx::op::add{}, p1, p2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(std::next(p1), migraphx::op::convert{}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(std::next(p2), migraphx::op::convert{}, p2);
auto hs = p.add_instruction(migraphx::op::add{}, hp1, hp2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1);
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
}
TEST_CASE(param_add_sub)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_add = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto sum = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hsum);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
auto hdiff = p.add_instruction(
migraphx::op::convert{migraphx::op::convert{migraphx::shape::half_type}}, diff);
auto res = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, res);
return p;
};
auto create_program_half_sub = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto hsum = p.add_instruction(migraphx::op::convert{migraphx::shape::half_type}, sum);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto diff = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hdiff);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_all = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto hres = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hres);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half_add();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_sub();
migraphx::quantize(p1, {"sub"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_all();
migraphx::quantize(p1);
migraphx::run_passes(p1, {migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
TEST_CASE(literal_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
std::vector<float> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
p.add_instruction(migraphx::op::add{}, l1, l2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
std::vector<migraphx::half> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
auto hs = p.add_instruction(migraphx::op::add{}, l1, l2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"all"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -69,7 +69,7 @@ auto operator<<(std::ostream& os, const T& x) -> decltype(os << x.name())
{
os << x.name();
char delim = '[';
reflect_each(x, [&](auto& y, auto name) {
reflect_each(x, [&](auto&& y, auto name) {
os << delim;
os << name << "=";
stream_write_value(os, y);
......@@ -87,6 +87,8 @@ namespace operation_equal {
template <class T, class U>
auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
{
static_assert(is_reflectable<T>{} or sizeof(T) <= 1,
"Missing equality operator or reflect method.");
if(x.name() != y.name())
return false;
const auto& yy = any_cast<T>(y);
......@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
}
template <class T>
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
std::ptrdiff_t output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
......@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
}
template <class T>
int output_alias_op(const T& x, const std::vector<shape>& shapes)
std::ptrdiff_t output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
......@@ -238,7 +240,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{},
virtual('is_context_free', returns = 'bool', const = True, default = 'is_context_free_op'),
virtual('has_finalize', returns = 'bool', const = True, default = 'has_finalize_op'),
virtual('output_alias',
returns = 'int',
returns = 'std::ptrdiff_t',
input = 'const std::vector<shape>&',
const = True,
default = 'output_alias_op'),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment