Commit 01557ea0 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

merge changes from the develop branches.

parents ec1ab58b 767ca0cc
......@@ -20,6 +20,13 @@ struct eliminate_allocation_target
struct allocate
{
migraphx::shape s{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
......
......@@ -10,6 +10,13 @@ struct concat
{
concat(std::size_t axis) { op.axis = axis; }
migraphx::op::concat op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "eliminate_concat::concat"; }
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs) const
{
......@@ -51,6 +58,13 @@ struct eliminate_concat_target
struct allocate
{
migraphx::shape s{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
......
......@@ -58,7 +58,7 @@ TEST_CASE(tanh_shape)
if(ins->name() == "hip::allocate")
{
migraphx::shape new_s{migraphx::shape::float_type, {3, 2}, {1, 3}};
migraphx::instruction::replace(ins, ins->get_operator(), new_s, ins->inputs());
ins->replace(migraphx::gpu::hip_allocate{new_s});
}
}
EXPECT(p1 != p2);
......
......@@ -1327,6 +1327,17 @@ struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
}
};
struct test_clip : verify_program<test_clip>
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, x);
return p;
}
};
struct test_conv_bn : verify_program<test_conv_bn>
{
migraphx::program create_program() const
......
......@@ -18,6 +18,13 @@ struct memory_coloring_target
struct allocate
{
migraphx::shape s{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.s, "shape"));
}
std::string name() const { return "allocate"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
......
......@@ -794,4 +794,14 @@ TEST_CASE(no_pad_test)
EXPECT(p == prog);
}
TEST_CASE(clip_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0);
auto prog = migraphx::parse_onnx("clip_test.onnx");
EXPECT(p == prog);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -29,12 +29,12 @@ TEST_CASE(basic_graph_test)
EXPECT(migraphx::contains(test, "\"@0\"[label=\"@literal\"]"));
EXPECT(migraphx::contains(test, "\"y\"[label=\"@param:y\"]"));
EXPECT(migraphx::contains(test, "\"x\"[label=\"@param:x\"]"));
EXPECT(migraphx::contains(test, "\"@3\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"@4\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"x\" -> \"@3\""));
EXPECT(migraphx::contains(test, "\"y\" -> \"@3\""));
EXPECT(migraphx::contains(test, "\"@3\" -> \"@4\""));
EXPECT(migraphx::contains(test, "\"@0\" -> \"@4\""));
EXPECT(migraphx::contains(test, "\"@1\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"@2\"[label=\"sum\"]"));
EXPECT(migraphx::contains(test, "\"x\" -> \"@1\""));
EXPECT(migraphx::contains(test, "\"y\" -> \"@1\""));
EXPECT(migraphx::contains(test, "\"@1\" -> \"@2\""));
EXPECT(migraphx::contains(test, "\"@0\" -> \"@2\""));
EXPECT(migraphx::contains(test, "[label=\"int64_type, {1}, {0}\"]"));
}
......
......@@ -69,7 +69,7 @@ auto operator<<(std::ostream& os, const T& x) -> decltype(os << x.name())
{
os << x.name();
char delim = '[';
reflect_each(x, [&](auto& y, auto name) {
reflect_each(x, [&](auto&& y, auto name) {
os << delim;
os << name << "=";
stream_write_value(os, y);
......@@ -87,6 +87,8 @@ namespace operation_equal {
template <class T, class U>
auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
{
static_assert(is_reflectable<T>{} or sizeof(T) <= 1,
"Missing equality operator or reflect method.");
if(x.name() != y.name())
return false;
const auto& yy = any_cast<T>(y);
......@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
}
template <class T>
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
std::ptrdiff_t output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
......@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
}
template <class T>
int output_alias_op(const T& x, const std::vector<shape>& shapes)
std::ptrdiff_t output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
......@@ -238,7 +240,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{},
virtual('is_context_free', returns = 'bool', const = True, default = 'is_context_free_op'),
virtual('has_finalize', returns = 'bool', const = True, default = 'has_finalize_op'),
virtual('output_alias',
returns = 'int',
returns = 'std::ptrdiff_t',
input = 'const std::vector<shape>&',
const = True,
default = 'output_alias_op'),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment