Commit d71254c5 authored by Paul's avatar Paul
Browse files

Merge branch 'master' into mem-color-tests

parents 0cbb0368 7d972d2b
...@@ -140,7 +140,7 @@ bool throws(F f) ...@@ -140,7 +140,7 @@ bool throws(F f)
} }
} }
template <class F, class Exception> template <class Exception, class F>
bool throws(F f, const std::string& msg = "") bool throws(F f, const std::string& msg = "")
{ {
try try
......
globalavgpool-example:i

01"GlobalAveragePooltest-globalavgpoolZ
0




b
1




B
\ No newline at end of file
globalmaxpool-example:e

01" GlobalMaxPooltest-globalmaxpoolZ
0




b
1




B
\ No newline at end of file
...@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool() ...@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1); auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5); auto l6 = p.add_instruction(migraph::op::relu{}, l5);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx"); auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx");
...@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool() ...@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool()
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6); auto l6 = p.add_instruction(migraph::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraph::op::activation{"relu"}, l6); auto l7 = p.add_instruction(migraph::op::relu{}, l6);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx"); auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx");
...@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2() ...@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1); auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5); auto l6 = p.add_instruction(migraph::op::relu{}, l5);
auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6); auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}}); auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}});
...@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2() ...@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8); auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraph::op::broadcast{axis, l10->get_shape()}, l9); auto l11 = p.add_instruction(migraph::op::broadcast{axis, l10->get_shape()}, l9);
auto l12 = p.add_instruction(migraph::op::add{}, l10, l11); auto l12 = p.add_instruction(migraph::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraph::op::activation{"relu"}, l12); auto l13 = p.add_instruction(migraph::op::relu{}, l12);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx"); auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx");
...@@ -118,6 +118,34 @@ void imagescaler_test() ...@@ -118,6 +118,34 @@ void imagescaler_test()
EXPECT(p == prog); EXPECT(p == prog);
} }
void globalavgpool_test()
{
migraph::program p;
auto input = p.add_parameter("0", migraph::shape{migraph::shape::float_type, {1, 3, 16, 16}});
auto op = migraph::op::pooling{"average"};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraph::parse_onnx("globalavgpool_test.onnx");
EXPECT(p == prog);
}
void globalmaxpool_test()
{
migraph::program p;
auto input = p.add_parameter("0", migraph::shape{migraph::shape::float_type, {1, 3, 16, 16}});
auto op = migraph::op::pooling{"max"};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraph::parse_onnx("globalmaxpool_test.onnx");
EXPECT(p == prog);
}
int main() int main()
{ {
pytorch_conv_bias_test(); pytorch_conv_bias_test();
...@@ -126,4 +154,6 @@ int main() ...@@ -126,4 +154,6 @@ int main()
pytorch_conv_relu_maxpool_x2(); pytorch_conv_relu_maxpool_x2();
leaky_relu_test(); leaky_relu_test();
imagescaler_test(); imagescaler_test();
globalavgpool_test();
globalmaxpool_test();
} }
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <test.hpp>
#include <basic_ops.hpp>
void simple_alias()
{
migraph::program p;
auto l = p.add_literal(1);
auto p1 = p.add_instruction(pass_op{}, l);
EXPECT(bool{migraph::instruction::get_output_alias(l) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p1) == l});
}
void cascade_alias()
{
migraph::program p;
auto l = p.add_literal(1);
auto p1 = p.add_instruction(pass_op{}, l);
auto p2 = p.add_instruction(pass_op{}, p1);
auto p3 = p.add_instruction(pass_op{}, p2);
EXPECT(bool{migraph::instruction::get_output_alias(l) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p1) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p2) == l});
EXPECT(bool{migraph::instruction::get_output_alias(p3) == l});
}
void no_alias()
{
migraph::program p;
auto x = p.add_literal(1);
auto y = p.add_literal(2);
auto sum = p.add_instruction(sum_op{}, x, y);
EXPECT(bool{migraph::instruction::get_output_alias(sum) == sum});
}
int main()
{
simple_alias();
cascade_alias();
no_alias();
}
#ifndef MIGRAPH_GUARD_CONCAT_OPT_HPP
#define MIGRAPH_GUARD_CONCAT_OPT_HPP
#include <cassert>
#include <string>
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include <migraph/operation.hpp>
#include <migraph/operators.hpp>
namespace migraph {
struct program;
#ifdef DOXYGEN
/// An interface for target-dependent optimization for the concat instruction
struct concat_optimization
{
/// The name of the target-dependent concat operator
std::string name() const;
/// A name of the target-dependent allocate operator
std::string allocate() const;
/// Return the target-independent concat operator
op::concat get_concat(const operation& op) const;
};
#else
<%
interface('concat_optimization',
virtual('name', returns='std::string', const=True),
virtual('allocate', returns='std::string', const=True),
virtual('get_concat', returns='op::concat', op='const operation&', const=True)
)
%>
#endif
} // namespace migraph
#endif
...@@ -43,6 +43,9 @@ struct operation ...@@ -43,6 +43,9 @@ struct operation
* the same the `output` shape. * the same the `output` shape.
*/ */
argument compute(context& ctx, const shape& output, const std::vector<argument>& input) const; argument compute(context& ctx, const shape& output, const std::vector<argument>& input) const;
/// An optional method to return which argument the output will alias. If
/// there is no aliased output then -1 can be returned.
int output_alias(const std::vector<shape>& input) const;
/// An optional stream operator to print the operation. When this is not /// An optional stream operator to print the operation. When this is not
/// implemented, it will just print the operation's name. /// implemented, it will just print the operation's name.
friend std::ostream& operator<<(std::ostream& os, const operation& op); friend std::ostream& operator<<(std::ostream& os, const operation& op);
...@@ -108,10 +111,34 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto ...@@ -108,10 +111,34 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto
return compute_op(rank<1>{}, x, ctx, output_shape, input); return compute_op(rank<1>{}, x, ctx, output_shape, input);
} }
template <class T>
int output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{
return -1;
}
template <class T>
auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
-> decltype(x.output_alias(shapes))
{
return x.output_alias(shapes);
}
template <class T>
int output_alias_op(const T& x, const std::vector<shape>& shapes)
{
return output_alias_op(rank<1>{}, x, shapes);
}
<% <%
interface( interface(
'operation', 'operation',
virtual('name', returns = 'std::string', const = True), virtual('name', returns = 'std::string', const = True),
virtual('output_alias',
returns = 'int',
input = 'const std::vector<shape>&',
const = True,
default = 'output_alias_op'),
virtual('compute_shape', returns = 'shape', input = 'const std::vector<shape>&', const = True), virtual('compute_shape', returns = 'shape', input = 'const std::vector<shape>&', const = True),
virtual('compute', virtual('compute',
returns = 'argument', returns = 'argument',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment