"example/02_gemm_bilinear/README.md" did not exist on "5d37d7bff4e631c3b94112c31a52f209ca39dfe2"
Commit eb0d8fee authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into driver

parents 65ef35cd 0d796941
2
0 Placeholder*
dtype0*
shape
:
2
1 Placeholder*
dtype0*
shape
:
F
matmul1MatMul01*
T0*
transpose_a(*
transpose_b("
\ No newline at end of file
:
0 Placeholder*
shape:*
dtype0
:
1 Placeholder*
dtype0*
shape:

mul1Mul01*
T0"
\ No newline at end of file
.
0 Placeholder*
shape:*
dtype0
.
1 Placeholder*
dtype0*
shape:
.
2 Placeholder*
dtype0*
shape:
4
pack1Pack012*
T0*
axis*
N"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:
:
2 Placeholder*
dtype0*
shape:
4
pack1Pack012*
T0*
axis*
N"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:
u
avg_poolingAvgPool0*
ksize
*
paddingVALID*
T0*
data_formatNHWC*
strides

u
max_poolingMaxPool0*
data_formatNHWC*
strides
*
ksize
*
paddingVALID*
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:

relu6Relu60*
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:

reluRelu0*
T0"
\ No newline at end of file
2
0 Placeholder*
dtype0*
shape
:

softmaxSoftmax0*
T0"
\ No newline at end of file
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/tf.hpp>
#include "test.hpp"
TEST_CASE(add_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::add{}, l0, l1);
auto prog = migraphx::parse_tf("add_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(add_bcast_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {2, 3}};
auto l0 = p.add_parameter("0", s0);
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {2, 1}});
auto l2 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l0);
auto l3 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l1);
p.add_instruction(migraphx::op::add{}, l2, l3);
auto prog = migraphx::parse_tf("add_bcast_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(batchnorm_test)
{
float epsilon = 1.001e-5f;
float momentum = 0.9f;
migraphx::program p;
migraphx::op::batch_norm_inference op{
epsilon, momentum, migraphx::op::batch_norm_inference::spatial};
migraphx::shape s0{migraphx::shape::float_type, {32}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 32, 16, 16}});
std::vector<float> const_vals(32);
std::fill(const_vals.begin(), const_vals.end(), 1.0f);
auto l2 = p.add_parameter("2", s0);
auto l3 = p.add_parameter("3", s0);
auto l4 = p.add_parameter("4", s0);
auto l1 = p.add_literal(migraphx::literal{s0, const_vals});
p.add_instruction(op, l0, l1, l2, l3, l4);
auto prog = migraphx::parse_tf("batchnorm_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(biasadd_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {1, 500, 1, 1}};
uint64_t axis = 1;
auto l0 = p.add_parameter("0", s0);
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {500}});
auto l2 = p.add_instruction(migraphx::op::broadcast{axis, l0->get_shape().lens()}, l1);
p.add_instruction(migraphx::op::add{}, l0, l2);
auto prog = migraphx::parse_tf("biasadd_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(concat_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 7, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
int axis = 1;
// tf uses axis as the third input, and it is in int32 format
// add the literal using a vector in order to set stride to 1 (like in tf parser)
p.add_literal(migraphx::shape{migraphx::shape::int32_type}, std::vector<int>{axis});
p.add_instruction(migraphx::op::concat{static_cast<std::size_t>(axis)}, l0, l1);
auto prog = migraphx::parse_tf("concat_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(const_test)
{
migraphx::program p;
p.add_literal(migraphx::shape{migraphx::shape::float_type}, std::vector<float>{1.0f});
auto prog = migraphx::parse_tf("constant_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(conv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
std::vector<float> weight_data(3 * 3 * 3 * 32);
std::fill(weight_data.begin(), weight_data.end(), 1.0f);
auto l1 =
p.add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 32}}, weight_data);
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.stride = {1, 1};
op.dilation = {1, 1};
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
auto l3 = p.add_instruction(migraphx::op::transpose{{1, 3, 0, 2}}, l2);
p.add_instruction(op, l0, l3);
auto prog = migraphx::parse_tf("conv_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(depthwiseconv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
std::vector<float> weight_data(3 * 3 * 3 * 1);
std::fill(weight_data.begin(), weight_data.end(), 1.0f);
auto l1 =
p.add_literal(migraphx::shape{migraphx::shape::float_type, {3, 3, 3, 1}}, weight_data);
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.stride = {1, 1};
op.dilation = {1, 1};
op.group = 3;
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
auto l3 = p.add_instruction(migraphx::op::transpose{{1, 3, 0, 2}}, l2);
auto l4 = p.add_instruction(migraphx::op::contiguous{}, l3);
auto l5 = p.add_instruction(migraphx::op::reshape{{3, 1, 3, 3}}, l4);
p.add_instruction(op, l0, l5);
auto prog = migraphx::parse_tf("depthwise_conv_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(identity_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = migraphx::parse_tf("identity_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(matmul_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {8, 4}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 8}});
auto trans_l0 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l0);
auto trans_l1 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l1);
p.add_instruction(migraphx::op::dot{}, trans_l0, trans_l1);
auto prog = migraphx::parse_tf("matmul_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(mean_test)
{
migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {2, 3}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
p.add_instruction(op, l0);
auto prog = migraphx::parse_tf("mean_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(mean_test_nhwc)
{
migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 2}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
p.add_instruction(op, l0);
auto prog = migraphx::parse_tf("mean_test_nhwc.pb", true);
EXPECT(p == prog);
}
TEST_CASE(mul_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 16}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 16}});
p.add_instruction(migraphx::op::mul{}, l0, l1);
auto prog = migraphx::parse_tf("mul_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(pack_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {2}});
auto l2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {2}});
std::vector<migraphx::instruction_ref> args{l0, l1, l2};
std::vector<migraphx::instruction_ref> unsqueezed_args;
int64_t axis = 1;
std::transform(args.begin(),
args.end(),
std::back_inserter(unsqueezed_args),
[&](migraphx::instruction_ref arg) {
return p.add_instruction(migraphx::op::unsqueeze{{axis}}, arg);
});
p.add_instruction(migraphx::op::concat{static_cast<size_t>(axis)}, unsqueezed_args);
auto prog = migraphx::parse_tf("pack_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(pack_test_nhwc)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto l2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
std::vector<migraphx::instruction_ref> args{l0, l1, l2};
std::vector<migraphx::instruction_ref> unsqueezed_args;
int64_t nchw_axis = 1;
std::transform(args.begin(),
args.end(),
std::back_inserter(unsqueezed_args),
[&](migraphx::instruction_ref arg) {
return p.add_instruction(migraphx::op::unsqueeze{{nchw_axis}}, arg);
});
p.add_instruction(migraphx::op::concat{static_cast<size_t>(nchw_axis)}, unsqueezed_args);
auto prog = migraphx::parse_tf("pack_test_nhwc.pb", true);
EXPECT(p == prog);
}
TEST_CASE(pooling_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
migraphx::op::pooling avg_pool_op{"average"};
migraphx::op::pooling max_pool_op{"max"};
avg_pool_op.padding_mode = migraphx::op::padding_mode_t::valid;
max_pool_op.padding_mode = migraphx::op::padding_mode_t::valid;
avg_pool_op.stride = {2, 2};
max_pool_op.stride = {2, 2};
avg_pool_op.lengths = {2, 2};
max_pool_op.lengths = {2, 2};
p.add_instruction(max_pool_op, l0);
p.add_instruction(avg_pool_op, l0);
auto prog = migraphx::parse_tf("pooling_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(relu_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::relu{}, l0);
auto prog = migraphx::parse_tf("relu_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(relu6_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0);
auto prog = migraphx::parse_tf("relu6_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(reshape_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {16}});
migraphx::shape s0{migraphx::shape::int32_type, {4}};
// in tf, the second arg is a literal that contains new dimensions
p.add_literal(migraphx::literal{s0, {1, 1, 1, 16}});
p.add_instruction(migraphx::op::reshape{{1, 1, 1, 16}}, l0);
auto prog = migraphx::parse_tf("reshape_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(softmax_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3}});
auto dims = l0->get_shape().lens();
auto r = p.add_instruction(migraphx::op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, l0);
auto s = p.add_instruction(migraphx::op::softmax{}, r);
p.add_instruction(migraphx::op::reshape{{long(dims[0]), long(dims[1])}}, s);
auto prog = migraphx::parse_tf("softmax_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(squeeze_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 3, 1}});
p.add_instruction(migraphx::op::squeeze{{0, 3}}, l0);
auto prog = migraphx::parse_tf("squeeze_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(stridedslice_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 1, 1}});
std::size_t num_axes = 4;
migraphx::op::slice op;
op.starts = {0, 0, 0, 0};
op.ends = {1, 5, 1, 1};
op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0);
// add literals for starts, ends, and strides in tf (NHWC format)
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{0, 0, 0, 0});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{1, 1, 1, 5});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{1, 1, 1, 1});
auto l1 = p.add_instruction(op, l0);
auto shrink_axis = 2;
p.add_instruction(migraphx::op::squeeze{{shrink_axis}}, l1);
auto prog = migraphx::parse_tf("stridedslice_test.pb", true);
EXPECT(p == prog);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
#include <migraphx/half.hpp>
TEST_CASE(param_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
p.add_instruction(migraphx::op::add{}, p1, p2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(std::next(p1), migraphx::op::convert{}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(std::next(p2), migraphx::op::convert{}, p2);
auto hs = p.add_instruction(migraphx::op::add{}, hp1, hp2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1);
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
}
TEST_CASE(param_add_sub)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_add = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto sum = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hsum);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
auto hdiff = p.add_instruction(
migraphx::op::convert{migraphx::op::convert{migraphx::shape::half_type}}, diff);
auto res = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, res);
return p;
};
auto create_program_half_sub = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto hsum = p.add_instruction(migraphx::op::convert{migraphx::shape::half_type}, sum);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto diff = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hdiff);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_all = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto hres = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hres);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half_add();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_sub();
migraphx::quantize(p1, {"sub"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_all();
migraphx::quantize(p1);
migraphx::run_passes(p1, {migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
TEST_CASE(literal_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
std::vector<float> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
p.add_instruction(migraphx::op::add{}, l1, l2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
std::vector<migraphx::half> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
auto hs = p.add_instruction(migraphx::op::add{}, l1, l2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"all"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <utility> #include <utility>
#include <migraphx/operation.hpp> #include <migraphx/operation.hpp>
#include <migraphx/operators.hpp> #include <migraphx/op/concat.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
namespace migraphx { namespace migraphx {
......
...@@ -49,7 +49,7 @@ struct operation ...@@ -49,7 +49,7 @@ struct operation
argument compute(context& ctx, const shape& output, const std::vector<argument>& input) const; argument compute(context& ctx, const shape& output, const std::vector<argument>& input) const;
/// An optional method to return which argument the output will alias. If /// An optional method to return which argument the output will alias. If
/// there is no aliased output then -1 can be returned. /// there is no aliased output then -1 can be returned.
int output_alias(const std::vector<shape>& input) const; std::ptrdiff_t output_alias(const std::vector<shape>& input) const;
/// An optional stream operator to print the operation. When this is not /// An optional stream operator to print the operation. When this is not
/// implemented, it will just print the operation's name. /// implemented, it will just print the operation's name.
friend std::ostream& operator<<(std::ostream& os, const operation& op); friend std::ostream& operator<<(std::ostream& os, const operation& op);
...@@ -69,7 +69,7 @@ auto operator<<(std::ostream& os, const T& x) -> decltype(os << x.name()) ...@@ -69,7 +69,7 @@ auto operator<<(std::ostream& os, const T& x) -> decltype(os << x.name())
{ {
os << x.name(); os << x.name();
char delim = '['; char delim = '[';
reflect_each(x, [&](auto& y, auto name) { reflect_each(x, [&](auto&& y, auto name) {
os << delim; os << delim;
os << name << "="; os << name << "=";
stream_write_value(os, y); stream_write_value(os, y);
...@@ -87,6 +87,8 @@ namespace operation_equal { ...@@ -87,6 +87,8 @@ namespace operation_equal {
template <class T, class U> template <class T, class U>
auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name()) auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
{ {
static_assert(is_reflectable<T>{} or sizeof(T) <= 1,
"Missing equality operator or reflect method.");
if(x.name() != y.name()) if(x.name() != y.name())
return false; return false;
const auto& yy = any_cast<T>(y); const auto& yy = any_cast<T>(y);
...@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op( ...@@ -175,7 +177,7 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
} }
template <class T> template <class T>
int output_alias_op(rank<0>, const T&, const std::vector<shape>&) std::ptrdiff_t output_alias_op(rank<0>, const T&, const std::vector<shape>&)
{ {
return -1; return -1;
} }
...@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes) ...@@ -188,7 +190,7 @@ auto output_alias_op(rank<1>, const T& x, const std::vector<shape>& shapes)
} }
template <class T> template <class T>
int output_alias_op(const T& x, const std::vector<shape>& shapes) std::ptrdiff_t output_alias_op(const T& x, const std::vector<shape>& shapes)
{ {
return output_alias_op(rank<1>{}, x, shapes); return output_alias_op(rank<1>{}, x, shapes);
} }
...@@ -238,7 +240,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{}, ...@@ -238,7 +240,7 @@ auto has_finalize_op(const T&) -> decltype(has_finalize_op(rank<1>{},
virtual('is_context_free', returns = 'bool', const = True, default = 'is_context_free_op'), virtual('is_context_free', returns = 'bool', const = True, default = 'is_context_free_op'),
virtual('has_finalize', returns = 'bool', const = True, default = 'has_finalize_op'), virtual('has_finalize', returns = 'bool', const = True, default = 'has_finalize_op'),
virtual('output_alias', virtual('output_alias',
returns = 'int', returns = 'std::ptrdiff_t',
input = 'const std::vector<shape>&', input = 'const std::vector<shape>&',
const = True, const = True,
default = 'output_alias_op'), default = 'output_alias_op'),
......
#ifndef MIGRAPHX_GUARD_SCHEDULE_MODEL_HPP
#define MIGRAPHX_GUARD_SCHEDULE_MODEL_HPP
#include <cassert>
#include <string>
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include <migraphx/config.hpp>
#include <migraphx/instruction_ref.hpp>
#include <vector>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct program;
struct operation;
#ifdef DOXYGEN
/// An interface for target-dependent model for the scheduler
struct schedule_model
{
/// Get the number of concurrent instruction allowed
std::size_t concurrency() const;
/// Schedule a concurrent instruction
void sched(program& p, instruction_ref ins, std::size_t n) const;
// Insert necessary waits before an instruction
void wait(program& p, instruction_ref ins, std::size_t wait_id) const;
// Insert necessary records after an instruction
void record(program& p, instruction_ref ins, std::size_t wait_id) const;
/// Compute weights for an operation
std::size_t weight(const operation& op) const;
};
#else
<%
interface('schedule_model',
virtual('concurrency', returns='std::size_t', const=True),
virtual('sched', p='program&', ins='instruction_ref', n='std::size_t', const=True),
virtual('wait', p='program&', ins='instruction_ref', wait_id='std::size_t', const=True),
virtual('record', p='program&', ins='instruction_ref', wait_id='std::size_t', const=True),
virtual('weight', returns='std::size_t', op='const operation&', const=True)
)
%>
#endif
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -22,10 +22,8 @@ struct target ...@@ -22,10 +22,8 @@ struct target
{ {
/// A unique name used to identify the target /// A unique name used to identify the target
std::string name() const; std::string name() const;
/// The transformation passes to be run
/** /**
* @brief The transformation pass to be run during compilation. * @brief The transformation pass to be run during compilation.
* @details [long description]
* *
* @param ctx This is the target-dependent context that is created by `get_context` * @param ctx This is the target-dependent context that is created by `get_context`
* @return The passes to be ran * @return The passes to be ran
......
import string, sys, re, os import string, sys, re, os
trivial = [
'std::size_t',
'instruction_ref'
]
headers = ''' headers = '''
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
...@@ -286,7 +292,7 @@ def convert_member(d, struct_name): ...@@ -286,7 +292,7 @@ def convert_member(d, struct_name):
member['this'] = x member['this'] = x
if 'const' in t: if 'const' in t:
member['member_const'] = 'const' member['member_const'] = 'const'
if t.endswith(('&', '*')): if t.endswith(('&', '*')) or t in trivial:
if use_member: member_args.append(x) if use_member: member_args.append(x)
args.append(arg_name) args.append(arg_name)
else: else:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment