Commit d3987641 authored by Scott Thornton's avatar Scott Thornton
Browse files

Moved operators into an op namespace

parent f9f4f713
......@@ -13,7 +13,7 @@ void auto_contiguous::apply(program& p) const
shape s = ins->get_shape();
if(not s.standard())
{
auto c = p.insert_instruction(std::next(ins), contiguous{}, ins);
auto c = p.insert_instruction(std::next(ins), op::contiguous{}, ins);
p.replace_instruction(ins, c);
}
}
......
......@@ -27,7 +27,7 @@ void eliminate_allocation::apply(program& p) const
auto ins = pp.first;
auto s = ins->get_shape();
auto offset = pp.second;
p.replace_instruction(ins, load{s, offset}, mem);
p.replace_instruction(ins, op::load{s, offset}, mem);
}
}
} // namespace migraph
......@@ -28,7 +28,7 @@ void fwd_conv_batchnorm_rewrite::apply(program& p) const
const auto& mean = ins->inputs()[3]->get_literal();
const auto& variance = ins->inputs()[4]->get_literal();
// Get epsilon
auto bn_op = any_cast<batch_norm_inference>(ins->get_operator());
auto bn_op = any_cast<op::batch_norm_inference>(ins->get_operator());
auto epsilon = bn_op.epsilon;
// Get convolution weights
const auto& weights = conv_ins->inputs()[1]->get_literal();
......@@ -59,8 +59,8 @@ void fwd_conv_batchnorm_rewrite::apply(program& p) const
auto l_weights = p.add_literal({weights.get_shape(), new_weights.data()});
auto l_bias = p.add_literal({new_bias.get_shape(), new_bias.data()});
auto c = p.replace_instruction(conv_ins, conv_op, {conv_ins->inputs()[0], l_weights});
auto b = p.insert_instruction(ins, broadcast{1}, c, l_bias);
p.replace_instruction(ins, add{}, {c, b});
auto b = p.insert_instruction(ins, op::broadcast{1}, c, l_bias);
p.replace_instruction(ins, op::add{}, {c, b});
}
}
} // namespace migraph
......@@ -10,6 +10,7 @@
#include <utility>
namespace migraph {
namespace op {
struct not_computable
{
......@@ -571,6 +572,7 @@ struct outline
}
};
} // namespace op
} // namespace migraph
#endif
......@@ -48,13 +48,13 @@ struct onnx_parser
onnx_parser()
{
add_generic_op("Add", add{});
add_generic_op("Div", div{});
add_generic_op("MatMul", gemm{});
add_generic_op("Mul", mul{});
add_generic_op("Relu", activation{"relu"});
add_generic_op("Sub", sub{});
add_generic_op("Sum", add{});
add_generic_op("Add", op::add{});
add_generic_op("Div", op::div{});
add_generic_op("MatMul", op::gemm{});
add_generic_op("Mul", op::mul{});
add_generic_op("Relu", op::activation{"relu"});
add_generic_op("Sub", op::sub{});
add_generic_op("Sum", op::add{});
add_mem_op("Constant", &onnx_parser::parse_constant);
add_mem_op("Conv", &onnx_parser::parse_conv);
......@@ -93,7 +93,7 @@ struct onnx_parser
uint64_t axis = (contains(attributes, "axis"))
? parse_value(attributes.at("axis")).at<uint64_t>()
: 0;
auto l = prog.add_instruction(broadcast{axis}, args);
auto l = prog.add_instruction(op::broadcast{axis}, args);
return prog.add_instruction(x, args[0], l);
}
}
......@@ -105,15 +105,15 @@ struct onnx_parser
parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
{
auto dims = args.front()->get_shape().lens();
auto r = prog.add_instruction(reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
auto s = prog.add_instruction(softmax{}, r);
return prog.add_instruction(reshape{{long(dims[0]), long(dims[1])}}, s);
auto r = prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
auto s = prog.add_instruction(op::softmax{}, r);
return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
}
instruction_ref
parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
convolution op;
op::convolution op;
if(contains(attributes, "pads"))
{
copy(attributes["pads"].ints(), op.padding.begin());
......@@ -130,8 +130,8 @@ struct onnx_parser
{
uint64_t axis = 1;
auto l1 = prog.add_instruction(op, args[0], args[1]);
auto l2 = prog.add_instruction(broadcast{axis}, l1, args[2]);
return prog.add_instruction(add{}, l1, l2);
auto l2 = prog.add_instruction(op::broadcast{axis}, l1, args[2]);
return prog.add_instruction(op::add{}, l1, l2);
}
return prog.add_instruction(op, args);
}
......@@ -140,7 +140,7 @@ struct onnx_parser
attribute_map attributes,
std::vector<instruction_ref> args)
{
pooling op{name == "MaxPool" ? "max" : "average"};
op::pooling op{name == "MaxPool" ? "max" : "average"};
if(contains(attributes, "pads"))
{
copy(attributes["pads"].ints(), op.padding.begin());
......@@ -159,7 +159,7 @@ struct onnx_parser
instruction_ref
parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
reshape op;
op::reshape op;
if(args.size() == 1)
{
literal s = parse_value(attributes.at("shape"));
......@@ -181,7 +181,7 @@ struct onnx_parser
{
axis = parse_value(attributes.at("axis")).at<int>();
}
return prog.add_instruction(flatten{axis}, args[0]);
return prog.add_instruction(op::flatten{axis}, args[0]);
}
instruction_ref parse_constant(const std::string&,
......@@ -216,16 +216,16 @@ struct onnx_parser
transb = parse_value(attributes.at("transB")).at<bool>();
}
std::vector<int64_t> perm = {1, 0};
auto l1 = (transa) ? prog.add_instruction(transpose{perm}, args[0]) : args[0];
auto l2 = (transb) ? prog.add_instruction(transpose{perm}, args[1]) : args[1];
auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
if(args.size() == 3)
{
uint64_t axis = 1;
auto l3 = prog.add_instruction(gemm{alpha, beta}, l1, l2);
auto l4 = prog.add_instruction(broadcast{axis}, l3, args[2]);
return prog.add_instruction(add{}, l3, l4);
auto l3 = prog.add_instruction(op::gemm{alpha, beta}, l1, l2);
auto l4 = prog.add_instruction(op::broadcast{axis}, l3, args[2]);
return prog.add_instruction(op::add{}, l3, l4);
}
return prog.add_instruction(gemm{alpha, beta}, l1, l2);
return prog.add_instruction(op::gemm{alpha, beta}, l1, l2);
}
instruction_ref
......@@ -233,7 +233,7 @@ struct onnx_parser
{
float epsilon = 1e-5f;
float momentum = 0.9f;
batch_norm_inference::bn_infer_mode_t bn_mode = batch_norm_inference::spatial;
op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
bool is_test = false;
if(contains(attributes, "epsilon"))
{
......@@ -250,10 +250,10 @@ struct onnx_parser
if(contains(attributes, "spatial"))
{
bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
? batch_norm_inference::spatial
: batch_norm_inference::per_activation;
? op::batch_norm_inference::spatial
: op::batch_norm_inference::per_activation;
}
batch_norm_inference op{epsilon, momentum, bn_mode, is_test};
op::batch_norm_inference op{epsilon, momentum, bn_mode, is_test};
return prog.add_instruction(op, std::move(args));
}
......
......@@ -36,7 +36,7 @@ T zero(const T&)
//
struct cpu_batch_norm_inference
{
batch_norm_inference op;
op::batch_norm_inference op;
std::string name() const { return "cpu::batch_norm_inference"; }
......@@ -58,7 +58,7 @@ struct cpu_batch_norm_inference
auto image_height = output_shape.lens()[2];
auto image_width = output_shape.lens()[3];
if(op.bn_mode == batch_norm_inference::spatial)
if(op.bn_mode == op::batch_norm_inference::spatial)
{
visit_all(output, input, mini_batch_mean, mini_batch_variance, arg_gamma, arg_bias)(
[&](auto result, auto buffer, auto mean, auto variance, auto gamma, auto bias) {
......@@ -73,7 +73,7 @@ struct cpu_batch_norm_inference
});
}
if(op.bn_mode == batch_norm_inference::per_activation)
if(op.bn_mode == op::batch_norm_inference::per_activation)
{
visit_all(output, input, mini_batch_mean, mini_batch_mean, arg_gamma, arg_bias)(
[&](auto result, auto buffer, auto mean, auto variance, auto gamma, auto bias) {
......@@ -95,7 +95,7 @@ struct cpu_batch_norm_inference
struct cpu_convolution
{
convolution op;
op::convolution op;
std::string name() const { return "cpu::convolution"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -136,7 +136,7 @@ struct cpu_convolution
struct cpu_im2col
{
im2col op;
op::im2col op;
static std::string name() { return "cpu::im2col"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -218,7 +218,7 @@ struct avg_pool
template <class Op>
struct cpu_pooling
{
pooling op;
op::pooling op;
std::string name() const { return "cpu::pooling_" + Op::name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -266,7 +266,7 @@ struct cpu_pooling
struct cpu_contiguous
{
contiguous op;
op::contiguous op;
std::string name() const { return "cpu::contiguous"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
......@@ -284,7 +284,7 @@ struct cpu_contiguous
struct cpu_gemm
{
gemm op;
op::gemm op;
std::string name() const { return "cpu::gemm"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
......@@ -551,12 +551,12 @@ struct cpu_apply
void init()
{
apply_map["im2col"] = extend_op<cpu_im2col, im2col>();
apply_map["convolution"] = extend_op<cpu_convolution, convolution>();
apply_map["gemm"] = extend_op<cpu_gemm, gemm>();
apply_map["im2col"] = extend_op<cpu_im2col, op::im2col>();
apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>();
apply_map["gemm"] = extend_op<cpu_gemm, op::gemm>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, contiguous>();
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["identity"] = simple_op<cpu_unary<identity_op>>();
apply_map["tanh"] = simple_op<cpu_unary<tanh_op>>();
......@@ -609,14 +609,14 @@ struct cpu_apply
void apply_activation(instruction_ref ins)
{
auto&& op = any_cast<activation>(ins->get_operator());
auto&& op = any_cast<op::activation>(ins->get_operator());
if(op.mode == "relu")
prog->replace_instruction(ins, cpu_unary<relu_op>{}, ins->inputs());
}
void apply_pooling(instruction_ref ins)
{
auto&& op = any_cast<pooling>(ins->get_operator());
auto&& op = any_cast<op::pooling>(ins->get_operator());
if(op.mode == "max")
prog->replace_instruction(ins, cpu_pooling<max_pool>{op}, ins->inputs());
else if(op.mode == "average")
......
......@@ -41,7 +41,7 @@ void after_literal_transpose()
auto l = p.add_literal(get_2x2());
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
auto t = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto t = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
p.add_instruction(pass_op{}, t);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
......@@ -57,7 +57,7 @@ void after_literal_broadcast()
auto l2 = p.add_literal(get_2());
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
auto b = p.add_instruction(migraph::broadcast{}, l1, l2);
auto b = p.add_instruction(migraph::op::broadcast{}, l1, l2);
p.add_instruction(pass_op{}, b);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().broadcasted());
......@@ -72,7 +72,7 @@ void after_param_transpose()
auto l = p.add_parameter("2x2", {migraph::shape::float_type, {2, 2}});
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
auto t = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto t = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
p.add_instruction(pass_op{}, t);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
......@@ -88,7 +88,7 @@ void after_param_broadcast()
auto l2 = p.add_parameter("2", {migraph::shape::float_type, {2}});
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
auto b = p.add_instruction(migraph::broadcast{}, l1, l2);
auto b = p.add_instruction(migraph::op::broadcast{}, l1, l2);
p.add_instruction(pass_op{}, b);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().broadcasted());
......
......@@ -24,7 +24,7 @@ void im2col_3x3_no_pad_identity_test()
migraph::shape s_weights{migraph::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_image = p.add_literal(migraph::literal{s_image, input});
auto l_weights = p.add_literal(migraph::literal{s_weights, weights});
p.add_instruction(migraph::im2col{padding, stride, dilation}, l_image, l_weights);
p.add_instruction(migraph::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -53,7 +53,7 @@ void im2col_3x3_no_pad_test()
migraph::shape s_weights{migraph::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_image = p.add_literal(migraph::literal{s_image, input});
auto l_weights = p.add_literal(migraph::literal{s_weights, weights});
p.add_instruction(migraph::im2col{padding, stride, dilation}, l_image, l_weights);
p.add_instruction(migraph::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -85,7 +85,7 @@ void im2col_3x3_stride_2_no_pad_test()
migraph::shape s_weights{migraph::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_image = p.add_literal(migraph::literal{s_image, input});
auto l_weights = p.add_literal(migraph::literal{s_weights, weights});
p.add_instruction(migraph::im2col{padding, stride, dilation}, l_image, l_weights);
p.add_instruction(migraph::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -118,7 +118,7 @@ void im2col_3x3_with_padding_test()
migraph::shape s_weights{migraph::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_image = p.add_literal(migraph::literal{s_image, input});
auto l_weights = p.add_literal(migraph::literal{s_weights, weights});
p.add_instruction(migraph::im2col{padding, stride, dilation}, l_image, l_weights);
p.add_instruction(migraph::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -160,7 +160,7 @@ void batch_norm_inference_test()
auto mean = p.add_literal(migraph::literal{vars, mean_data});
auto variance = p.add_literal(migraph::literal{vars, variance_data});
p.add_instruction(migraph::batch_norm_inference{}, x, scale, bias, mean, variance);
p.add_instruction(migraph::op::batch_norm_inference{}, x, scale, bias, mean, variance);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -190,7 +190,7 @@ void im2col_3x3_with_channels_identity_test()
migraph::shape s_weights{migraph::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_image = p.add_literal(migraph::literal{s_image, input});
auto l_weights = p.add_literal(migraph::literal{s_weights, weights});
p.add_instruction(migraph::im2col{padding, stride, dilation}, l_image, l_weights);
p.add_instruction(migraph::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -206,7 +206,7 @@ void exp_test()
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::exp{}, l);
p.add_instruction(migraph::op::exp{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -220,7 +220,7 @@ void sin_test()
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::sin{}, l);
p.add_instruction(migraph::op::sin{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -234,7 +234,7 @@ void cos_test()
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::cos{}, l);
p.add_instruction(migraph::op::cos{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -248,7 +248,7 @@ void tan_test()
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::tan{}, l);
p.add_instruction(migraph::op::tan{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -263,7 +263,7 @@ void add_test()
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::add{}, l1, l2);
p.add_instruction(migraph::op::add{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -282,7 +282,7 @@ void broadcast_test()
uint64_t axis = 0;
auto l1 = p.add_literal(migraph::literal{a_shape, a_data});
auto l2 = p.add_literal(migraph::literal{b_shape, b_data});
p.add_instruction(migraph::broadcast{axis}, l1, l2);
p.add_instruction(migraph::op::broadcast{axis}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
auto output = result.get<int32_t>();
......@@ -301,8 +301,8 @@ void add_broadcast_test()
uint64_t axis = 0;
auto l1 = p.add_literal(migraph::literal{a_shape, a_data});
auto l2 = p.add_literal(migraph::literal{b_shape, b_data});
auto l3 = p.add_instruction(migraph::broadcast{axis}, l1, l2);
p.add_instruction(migraph::add{}, l1, l3);
auto l3 = p.add_instruction(migraph::op::broadcast{axis}, l1, l2);
p.add_instruction(migraph::op::add{}, l1, l3);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
EXPECT(result.get_shape().packed());
......@@ -318,7 +318,7 @@ void sub_test()
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::sub{}, l1, l2);
p.add_instruction(migraph::op::sub{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -333,7 +333,7 @@ void mul_test()
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::mul{}, l1, l2);
p.add_instruction(migraph::op::mul{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -348,7 +348,7 @@ void div_test()
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1.0f, 0.5f, 1.0f}});
auto l2 = p.add_literal(migraph::literal{s, {1.0f, 2.0f, 4.0f}});
p.add_instruction(migraph::div{}, l1, l2);
p.add_instruction(migraph::op::div{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -366,7 +366,7 @@ void reshape_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {8, 3, 1, 1};
p.add_instruction(migraph::reshape{new_shape}, l);
p.add_instruction(migraph::op::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -377,7 +377,7 @@ void reshape_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(migraph::reshape{new_shape}, l);
p.add_instruction(migraph::op::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -388,7 +388,7 @@ void reshape_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(migraph::reshape{new_shape}, l);
p.add_instruction(migraph::op::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
......@@ -436,7 +436,7 @@ void gemm_test()
auto al = p.add_literal(migraph::literal{a_shape, a});
migraph::shape b_shape{migraph::shape::get_type<T>{}, {5, 3}};
auto bl = p.add_literal(migraph::literal{b_shape, b});
p.add_instruction(migraph::gemm{}, al, bl);
p.add_instruction(migraph::op::gemm{}, al, bl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<T> results_vector(12);
......@@ -491,7 +491,7 @@ void maxpool_test()
0.52119428, 2.07681108, 0.88494766, 1.51522756, 0.54275119, 0.6629802};
migraph::shape a_shape{migraph::shape::float_type, {2, 3, 6, 6}};
auto al = p.add_literal(migraph::literal{a_shape, a});
p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{3, 2}}}, al);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{3, 2}}}, al);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::cout << result.get_shape() << std::endl;
......@@ -556,7 +556,7 @@ void softmax_test()
migraph::shape a_shape{migraph::shape::float_type, {5, 3, 4, 2}};
auto al = p.add_literal(migraph::literal{a_shape, a});
p.add_instruction(migraph::softmax{}, al);
p.add_instruction(migraph::op::softmax{}, al);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(120);
......@@ -618,7 +618,7 @@ void conv2d_test()
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(migraph::convolution{}, al, cl);
p.add_instruction(migraph::op::convolution{}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -674,7 +674,7 @@ void conv2d_padding_test()
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(migraph::convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.add_instruction(migraph::op::convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -735,7 +735,7 @@ void conv2d_padding_stride_test()
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(migraph::convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.add_instruction(migraph::op::convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -754,7 +754,7 @@ void transpose_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> perm = {0, 3, 1, 2};
p.add_instruction(migraph::transpose{perm}, l);
p.add_instruction(migraph::op::transpose{perm}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......@@ -767,8 +767,8 @@ void transpose_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> perm = {0, 3, 1, 2};
auto result = p.add_instruction(migraph::transpose{perm}, l);
p.add_instruction(migraph::contiguous{}, result);
auto result = p.add_instruction(migraph::op::transpose{perm}, l);
p.add_instruction(migraph::op::contiguous{}, result);
p.compile(migraph::cpu::cpu_target{});
auto result2 = p.eval({});
......@@ -787,7 +787,7 @@ void contiguous_test()
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
p.add_instruction(migraph::contiguous{}, l);
p.add_instruction(migraph::op::contiguous{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
......
......@@ -18,8 +18,8 @@ void standard_op()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto c = p.add_instruction(migraph::contiguous{}, t);
auto t = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
auto c = p.add_instruction(migraph::op::contiguous{}, t);
p.add_instruction(pass_standard_op{}, c);
auto count = std::distance(p.begin(), p.end());
p.compile(eliminate_contiguous_target{});
......@@ -30,8 +30,8 @@ void non_standard_op()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto c = p.add_instruction(migraph::contiguous{}, t);
auto t = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
auto c = p.add_instruction(migraph::op::contiguous{}, t);
p.add_instruction(pass_op{}, c);
auto count = std::distance(p.begin(), p.end());
p.compile(eliminate_contiguous_target{});
......
......@@ -13,9 +13,9 @@ void pytorch_conv_bias_test()
auto l1 = p.add_parameter("1", {migraph::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraph::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraph::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::broadcast{axis}, l3, l2);
p.add_instruction(migraph::add{}, l3, l4);
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis}, l3, l2);
p.add_instruction(migraph::op::add{}, l3, l4);
auto prog = migraph::parse_onnx("conv.onnx");
EXPECT(p == prog);
......@@ -28,11 +28,11 @@ void pytorch_conv_relu_maxpool()
auto l1 = p.add_parameter("1", {migraph::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraph::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraph::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::activation{"relu"}, l5);
p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx");
EXPECT(p == prog);
......@@ -50,12 +50,12 @@ void pytorch_conv_bn_relu_maxpool()
auto p5 = p.add_parameter("5", {migraph::shape::float_type, {1}});
auto p6 = p.add_parameter("6", {migraph::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraph::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::batch_norm_inference{}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraph::activation{"relu"}, l6);
p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::batch_norm_inference{}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraph::op::activation{"relu"}, l6);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx");
EXPECT(p == prog);
......@@ -68,19 +68,19 @@ void pytorch_conv_relu_maxpool_x2()
auto l1 = p.add_parameter("1", {migraph::shape::float_type, {5, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraph::shape::float_type, {5}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraph::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::activation{"relu"}, l5);
auto l7 = p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis}, l3, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5);
auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}});
auto l9 = p.add_parameter("4", {migraph::shape::float_type, {1}});
auto l10 = p.add_instruction(migraph::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraph::broadcast{axis}, l10, l9);
auto l12 = p.add_instruction(migraph::add{}, l10, l11);
auto l13 = p.add_instruction(migraph::activation{"relu"}, l12);
p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraph::op::broadcast{axis}, l10, l9);
auto l12 = p.add_instruction(migraph::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraph::op::activation{"relu"}, l12);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx");
......
......@@ -57,9 +57,9 @@ void batch_norm_inference_shape()
const size_t channels = 3;
migraph::shape s{migraph::shape::float_type, {4, channels, 3, 3}};
migraph::shape vars{migraph::shape::float_type, {channels}};
expect_shape(s, migraph::batch_norm_inference{}, s, vars, vars, vars, vars);
throws_shape(migraph::batch_norm_inference{}, s);
throws_shape(migraph::batch_norm_inference{}, s, vars, vars, vars, vars, vars);
expect_shape(s, migraph::op::batch_norm_inference{}, s, vars, vars, vars, vars);
throws_shape(migraph::op::batch_norm_inference{}, s);
throws_shape(migraph::op::batch_norm_inference{}, s, vars, vars, vars, vars, vars);
}
void convolution_shape()
......@@ -67,33 +67,33 @@ void convolution_shape()
migraph::shape output{migraph::shape::float_type, {4, 4, 1, 1}};
migraph::shape input{migraph::shape::float_type, {4, 3, 3, 3}};
migraph::shape weights{migraph::shape::float_type, {4, 3, 3, 3}};
expect_shape(output, migraph::convolution{}, input, weights);
throws_shape(migraph::convolution{}, input);
expect_shape(output, migraph::op::convolution{}, input, weights);
throws_shape(migraph::op::convolution{}, input);
migraph::shape input2{migraph::shape::float_type, {3, 3}};
migraph::shape weights2{migraph::shape::float_type, {3, 3}};
throws_shape(migraph::convolution{}, input2, weights2);
throws_shape(migraph::convolution{}, input2, weights);
throws_shape(migraph::op::convolution{}, input2, weights2);
throws_shape(migraph::op::convolution{}, input2, weights);
}
void transpose_shape()
{
migraph::shape input{migraph::shape::float_type, {2, 2}};
migraph::shape output{migraph::shape::float_type, {2, 2}, {1, 2}};
expect_shape(input, migraph::transpose{{0, 1}}, input);
expect_shape(output, migraph::transpose{{1, 0}}, input);
throws_shape(migraph::transpose{{1, 2}}, input);
expect_shape(input, migraph::op::transpose{{0, 1}}, input);
expect_shape(output, migraph::op::transpose{{1, 0}}, input);
throws_shape(migraph::op::transpose{{1, 2}}, input);
}
void contiguous_shape()
{
migraph::shape output{migraph::shape::float_type, {2, 2}};
migraph::shape input{migraph::shape::float_type, {2, 2}, {1, 2}};
expect_shape(output, migraph::contiguous{}, input);
throws_shape(migraph::contiguous{}, input, input);
expect_shape(output, migraph::op::contiguous{}, input);
throws_shape(migraph::op::contiguous{}, input, input);
migraph::shape single{migraph::shape::float_type, {2}};
throws_shape(migraph::contiguous{}, single);
throws_shape(migraph::op::contiguous{}, single);
}
void reshape_shape()
......@@ -105,12 +105,12 @@ void reshape_shape()
std::vector<std::size_t> lens(new_shape.size());
std::copy(new_shape.begin(), new_shape.end(), lens.begin());
migraph::shape output{migraph::shape::float_type, lens};
expect_shape(output, migraph::reshape{new_shape}, input);
expect_shape(output, migraph::op::reshape{new_shape}, input);
}
for(auto&& new_shape : std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}})
{
throws_shape(migraph::reshape{new_shape}, input);
throws_shape(migraph::op::reshape{new_shape}, input);
}
}
......@@ -118,16 +118,16 @@ void flatten_shape()
{
migraph::shape input{migraph::shape::float_type, {2, 4, 6, 8}};
expect_shape(
migraph::shape{migraph::shape::float_type, {1, 2 * 4 * 6 * 8}}, migraph::flatten{0}, input);
migraph::shape{migraph::shape::float_type, {1, 2 * 4 * 6 * 8}}, migraph::op::flatten{0}, input);
expect_shape(
migraph::shape{migraph::shape::float_type, {2, 4 * 6 * 8}}, migraph::flatten{1}, input);
migraph::shape{migraph::shape::float_type, {2, 4 * 6 * 8}}, migraph::op::flatten{1}, input);
expect_shape(
migraph::shape{migraph::shape::float_type, {2 * 4, 6 * 8}}, migraph::flatten{2}, input);
migraph::shape{migraph::shape::float_type, {2 * 4, 6 * 8}}, migraph::op::flatten{2}, input);
expect_shape(
migraph::shape{migraph::shape::float_type, {2 * 4 * 6, 8}}, migraph::flatten{3}, input);
migraph::shape{migraph::shape::float_type, {2 * 4 * 6, 8}}, migraph::op::flatten{3}, input);
expect_shape(
migraph::shape{migraph::shape::float_type, {2 * 4 * 6 * 8, 1}}, migraph::flatten{4}, input);
throws_shape(migraph::flatten{5}, input);
migraph::shape{migraph::shape::float_type, {2 * 4 * 6 * 8, 1}}, migraph::op::flatten{4}, input);
throws_shape(migraph::op::flatten{5}, input);
}
int main()
......
......@@ -18,9 +18,9 @@ void double_contig()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t1 = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto c1 = p.add_instruction(migraph::contiguous{}, t1);
auto c2 = p.add_instruction(migraph::contiguous{}, c1);
auto t1 = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
auto c1 = p.add_instruction(migraph::op::contiguous{}, t1);
auto c2 = p.add_instruction(migraph::op::contiguous{}, c1);
p.add_instruction(pass_op{}, c2);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
......@@ -36,8 +36,8 @@ void double_transpose()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t1 = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto t2 = p.add_instruction(migraph::transpose{{1, 0}}, t1);
auto t1 = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
auto t2 = p.add_instruction(migraph::op::transpose{{1, 0}}, t1);
p.add_instruction(pass_op{}, t2);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
......@@ -53,10 +53,10 @@ void double_transpose_contig()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t1 = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto c1 = p.add_instruction(migraph::contiguous{}, t1);
auto t2 = p.add_instruction(migraph::transpose{{1, 0}}, c1);
auto c2 = p.add_instruction(migraph::contiguous{}, t2);
auto t1 = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
auto c1 = p.add_instruction(migraph::op::contiguous{}, t1);
auto t2 = p.add_instruction(migraph::op::transpose{{1, 0}}, c1);
auto c2 = p.add_instruction(migraph::op::contiguous{}, t2);
p.add_instruction(pass_op{}, c2);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
......@@ -72,7 +72,7 @@ void single_transpose()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t1 = p.add_instruction(migraph::transpose{{1, 0}}, l);
auto t1 = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
p.add_instruction(pass_op{}, t1);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
......@@ -88,8 +88,8 @@ void double_transpose_sin_pass()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
auto t1 = p.add_instruction(migraph::transpose{{1, 0}}, l);
p.add_instruction(migraph::transpose{{1, 0}}, t1);
auto t1 = p.add_instruction(migraph::op::transpose{{1, 0}}, l);
p.add_instruction(migraph::op::transpose{{1, 0}}, t1);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
p.compile(simplify_reshapes_target{});
......@@ -106,7 +106,7 @@ void single_transpose_sin_pass()
{
migraph::program p;
auto l = p.add_literal(get_2x2());
p.add_instruction(migraph::transpose{{1, 0}}, l);
p.add_instruction(migraph::op::transpose{{1, 0}}, l);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
p.compile(simplify_reshapes_target{});
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment