Commit e141649a authored by wsttiger's avatar wsttiger
Browse files

Merge branch 'master' into remove_concat

parents df4b1e15 1cdb49a6
......@@ -556,7 +556,7 @@ void relu_test()
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::activation{"relu"}, l);
p.add_instruction(migraph::op::relu{}, l);
p.compile(migraph::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......
......@@ -158,7 +158,7 @@ struct test_literals
auto weights = p.add_literal(
generate_literal(migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}));
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv);
p.add_instruction(migraph::op::relu{}, conv);
return p;
}
};
......@@ -392,7 +392,7 @@ struct test_conv_relu
auto weights =
p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv);
p.add_instruction(migraph::op::relu{}, conv);
return p;
}
};
......@@ -406,7 +406,7 @@ struct test_conv_relu_half
auto weights =
p.add_parameter("w", migraph::shape{migraph::shape::half_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv);
p.add_instruction(migraph::op::relu{}, conv);
return p;
}
};
......@@ -419,7 +419,7 @@ struct test_add_relu
auto x = p.add_parameter("x", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto y = p.add_parameter("y", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto add = p.add_instruction(migraph::op::add{}, x, y);
p.add_instruction(migraph::op::activation{"relu"}, add);
p.add_instruction(migraph::op::relu{}, add);
return p;
}
};
......@@ -446,7 +446,7 @@ struct test_conv_pooling
p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
auto pooling = p.add_instruction(migraph::op::pooling{"max"}, conv);
p.add_instruction(migraph::op::activation{"relu"}, pooling);
p.add_instruction(migraph::op::relu{}, pooling);
return p;
}
};
......@@ -657,7 +657,7 @@ struct test_conv_bn_relu_pooling
auto variance = p.add_literal(migraph::abs(migraph::generate_literal(vars, 4)));
auto bn = p.add_instruction(
migraph::op::batch_norm_inference{}, conv, scale, bias, mean, variance);
auto relu = p.add_instruction(migraph::op::activation{"relu"}, bn);
auto relu = p.add_instruction(migraph::op::relu{}, bn);
p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu);
return p;
}
......@@ -794,7 +794,7 @@ struct test_conv_bn_relu_pooling2
auto conv2 = p.add_instruction(migraph::op::convolution{{0, 0}, {2, 2}, {1, 1}}, x2, w2);
auto bn2 = add_bn(p, conv2, 2048);
auto add = p.add_instruction(migraph::op::add{}, bn1, bn2);
auto relu = p.add_instruction(migraph::op::activation{"relu"}, add);
auto relu = p.add_instruction(migraph::op::relu{}, add);
p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu);
return p;
}
......
......@@ -79,6 +79,7 @@ struct pass_op
return {};
return inputs.front();
}
int output_alias(const std::vector<migraph::shape>&) const { return 0; }
};
struct pass_standard_op
......@@ -103,6 +104,7 @@ struct pass_standard_op
return {};
return inputs.front();
}
int output_alias(const std::vector<migraph::shape>&) const { return 0; }
};
struct nop
......
......@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5);
auto l6 = p.add_instruction(migraph::op::relu{}, l5);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx");
......@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool()
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraph::op::activation{"relu"}, l6);
auto l7 = p.add_instruction(migraph::op::relu{}, l6);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx");
......@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5);
auto l6 = p.add_instruction(migraph::op::relu{}, l5);
auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}});
......@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraph::op::broadcast{axis, l10->get_shape()}, l9);
auto l12 = p.add_instruction(migraph::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraph::op::activation{"relu"}, l12);
auto l13 = p.add_instruction(migraph::op::relu{}, l12);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx");
......
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <test.hpp>
#include <basic_ops.hpp>
void simple_alias()
{
migraph::program p;
auto l = p.add_literal(1);
auto p1 = p.add_instruction(pass_op{}, l);
EXPECT(bool{migraph::instruction::get_output_alias(l) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p1) == l});
}
void cascade_alias()
{
migraph::program p;
auto l = p.add_literal(1);
auto p1 = p.add_instruction(pass_op{}, l);
auto p2 = p.add_instruction(pass_op{}, p1);
auto p3 = p.add_instruction(pass_op{}, p2);
EXPECT(bool{migraph::instruction::get_output_alias(l) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p1) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p2) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p3) == l});
}
void no_alias()
{
migraph::program p;
auto x = p.add_literal(1);
auto y = p.add_literal(2);
auto sum = p.add_instruction(sum_op{}, x, y);
EXPECT(bool{migraph::instruction::get_output_alias(sum) == sum});
}
int main()
{
simple_alias();
cascade_alias();
no_alias();
}
......@@ -43,6 +43,9 @@ struct operation
* the same the `output` shape.
*/
argument compute(context& ctx, const shape& output, const std::vector<argument>& input) const;
/// An optional method to return which argument the output will alias. If
/// there is no aliased output then -1 can be returned.
int output_alias(const std::vector<shape>& input) const;
/// An optional stream operator to print the operation. When this is not
/// implemented, it will just print the operation's name.
friend std::ostream& operator<<(std::ostream& os, const operation& op);
......@@ -108,10 +111,34 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto
return compute_op(rank<1>{}, x, ctx, output_shape, input);
}
template <class T>
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
template <class T>
auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
-> decltype(x.output_alias(shapes))
{
return x.output_alias(shapes);
}
template <class T>
int output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
<%
interface(
'operation',
virtual('name', returns = 'std::string', const = True),
virtual('output_alias',
returns = 'int',
input = 'const std::vector<shape>&',
const = True,
default = 'output_alias_op'),
virtual('compute_shape', returns = 'shape', input = 'const std::vector<shape>&', const = True),
virtual('compute',
returns = 'argument',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment