Unverified Commit 0cd5dea2 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Merge pull request #104 from ROCmSoftwarePlatform/rm_activation

remove activation op
parents 4debaf07 ef1f8422
...@@ -223,22 +223,6 @@ struct pooling ...@@ -223,22 +223,6 @@ struct pooling
} }
}; };
struct activation
{
std::string mode;
std::string name() const { return "activation"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(1);
return inputs.front();
}
friend std::ostream& operator<<(std::ostream& os, const activation& op)
{
os << op.name() << ":" << op.mode;
return os;
}
};
struct leaky_relu struct leaky_relu
{ {
std::string name() const { return "leaky_relu"; } std::string name() const { return "leaky_relu"; }
...@@ -673,6 +657,11 @@ struct neg : unary ...@@ -673,6 +657,11 @@ struct neg : unary
std::string name() const { return "neg"; } std::string name() const { return "neg"; }
}; };
struct relu : unary
{
std::string name() const { return "relu"; }
};
struct softmax struct softmax
{ {
std::string name() const { return "softmax"; } std::string name() const { return "softmax"; }
......
...@@ -52,7 +52,7 @@ struct onnx_parser ...@@ -52,7 +52,7 @@ struct onnx_parser
add_generic_op("Div", op::div{}); add_generic_op("Div", op::div{});
add_generic_op("MatMul", op::dot{}); add_generic_op("MatMul", op::dot{});
add_generic_op("Mul", op::mul{}); add_generic_op("Mul", op::mul{});
add_generic_op("Relu", op::activation{"relu"}); add_generic_op("Relu", op::relu{});
add_generic_op("Sub", op::sub{}); add_generic_op("Sub", op::sub{});
add_generic_op("Sum", op::add{}); add_generic_op("Sum", op::add{});
......
...@@ -606,6 +606,7 @@ struct cpu_apply ...@@ -606,6 +606,7 @@ struct cpu_apply
apply_map["sin"] = simple_op<cpu_unary<sin_op>>(); apply_map["sin"] = simple_op<cpu_unary<sin_op>>();
apply_map["cos"] = simple_op<cpu_unary<cos_op>>(); apply_map["cos"] = simple_op<cpu_unary<cos_op>>();
apply_map["tan"] = simple_op<cpu_unary<tan_op>>(); apply_map["tan"] = simple_op<cpu_unary<tan_op>>();
apply_map["relu"] = simple_op<cpu_unary<relu_op>>();
apply_map["add"] = simple_op<cpu_binary<add_op>>(); apply_map["add"] = simple_op<cpu_binary<add_op>>();
apply_map["sub"] = simple_op<cpu_binary<sub_op>>(); apply_map["sub"] = simple_op<cpu_binary<sub_op>>();
apply_map["mul"] = simple_op<cpu_binary<mul_op>>(); apply_map["mul"] = simple_op<cpu_binary<mul_op>>();
...@@ -619,11 +620,7 @@ struct cpu_apply ...@@ -619,11 +620,7 @@ struct cpu_apply
init(); init();
for(auto it : iterator_for(*prog)) for(auto it : iterator_for(*prog))
{ {
if(it->name() == "activation") if(it->name() == "pooling")
{
apply_activation(it);
}
else if(it->name() == "pooling")
{ {
apply_pooling(it); apply_pooling(it);
} }
...@@ -647,13 +644,6 @@ struct cpu_apply ...@@ -647,13 +644,6 @@ struct cpu_apply
prog->replace_instruction(ins, T{op}, ins->inputs()); prog->replace_instruction(ins, T{op}, ins->inputs());
} }
void apply_activation(instruction_ref ins)
{
auto&& op = any_cast<op::activation>(ins->get_operator());
if(op.mode == "relu")
prog->replace_instruction(ins, cpu_unary<relu_op>{}, ins->inputs());
}
void apply_pooling(instruction_ref ins) void apply_pooling(instruction_ref ins)
{ {
auto&& op = any_cast<op::pooling>(ins->get_operator()); auto&& op = any_cast<op::pooling>(ins->get_operator());
......
...@@ -50,9 +50,9 @@ struct miopen_apply ...@@ -50,9 +50,9 @@ struct miopen_apply
{ {
check_shape(s, apply_convolution(it)); check_shape(s, apply_convolution(it));
} }
else if(it->name() == "activation") else if(it->name() == "relu")
{ {
check_shape(s, apply_activation(it)); check_shape(s, apply_relu(it));
} }
else if(it->name() == "leaky_relu") else if(it->name() == "leaky_relu")
{ {
...@@ -131,17 +131,13 @@ struct miopen_apply ...@@ -131,17 +131,13 @@ struct miopen_apply
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output); ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
} }
instruction_ref apply_activation(instruction_ref ins) instruction_ref apply_relu(instruction_ref ins)
{ {
auto&& op = any_cast<op::activation>(ins->get_operator()); auto ad = make_relu();
auto ad = make_relu();
if(op.mode == "relu") auto output = insert_allocation(ins, ins->get_shape());
{ return prog->replace_instruction(
auto output = insert_allocation(ins, ins->get_shape()); ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
return prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, ins->inputs().at(0), output);
}
return ins;
} }
instruction_ref apply_leaky_relu(instruction_ref ins) instruction_ref apply_leaky_relu(instruction_ref ins)
......
...@@ -556,7 +556,7 @@ void relu_test() ...@@ -556,7 +556,7 @@ void relu_test()
migraph::program p; migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}}; migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}}); auto l = p.add_literal(migraph::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraph::op::activation{"relu"}, l); p.add_instruction(migraph::op::relu{}, l);
p.compile(migraph::cpu::target{}); p.compile(migraph::cpu::target{});
auto result = p.eval({}); auto result = p.eval({});
std::vector<float> results_vector(3); std::vector<float> results_vector(3);
......
...@@ -158,7 +158,7 @@ struct test_literals ...@@ -158,7 +158,7 @@ struct test_literals
auto weights = p.add_literal( auto weights = p.add_literal(
generate_literal(migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}})); generate_literal(migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}));
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights); auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv); p.add_instruction(migraph::op::relu{}, conv);
return p; return p;
} }
}; };
...@@ -392,7 +392,7 @@ struct test_conv_relu ...@@ -392,7 +392,7 @@ struct test_conv_relu
auto weights = auto weights =
p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}); p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights); auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv); p.add_instruction(migraph::op::relu{}, conv);
return p; return p;
} }
}; };
...@@ -406,7 +406,7 @@ struct test_conv_relu_half ...@@ -406,7 +406,7 @@ struct test_conv_relu_half
auto weights = auto weights =
p.add_parameter("w", migraph::shape{migraph::shape::half_type, {4, 3, 3, 3}}); p.add_parameter("w", migraph::shape{migraph::shape::half_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights); auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
p.add_instruction(migraph::op::activation{"relu"}, conv); p.add_instruction(migraph::op::relu{}, conv);
return p; return p;
} }
}; };
...@@ -419,7 +419,7 @@ struct test_add_relu ...@@ -419,7 +419,7 @@ struct test_add_relu
auto x = p.add_parameter("x", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}); auto x = p.add_parameter("x", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto y = p.add_parameter("y", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}); auto y = p.add_parameter("y", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto add = p.add_instruction(migraph::op::add{}, x, y); auto add = p.add_instruction(migraph::op::add{}, x, y);
p.add_instruction(migraph::op::activation{"relu"}, add); p.add_instruction(migraph::op::relu{}, add);
return p; return p;
} }
}; };
...@@ -446,7 +446,7 @@ struct test_conv_pooling ...@@ -446,7 +446,7 @@ struct test_conv_pooling
p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}}); p.add_parameter("w", migraph::shape{migraph::shape::float_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(migraph::op::convolution{}, input, weights); auto conv = p.add_instruction(migraph::op::convolution{}, input, weights);
auto pooling = p.add_instruction(migraph::op::pooling{"max"}, conv); auto pooling = p.add_instruction(migraph::op::pooling{"max"}, conv);
p.add_instruction(migraph::op::activation{"relu"}, pooling); p.add_instruction(migraph::op::relu{}, pooling);
return p; return p;
} }
}; };
...@@ -657,7 +657,7 @@ struct test_conv_bn_relu_pooling ...@@ -657,7 +657,7 @@ struct test_conv_bn_relu_pooling
auto variance = p.add_literal(migraph::abs(migraph::generate_literal(vars, 4))); auto variance = p.add_literal(migraph::abs(migraph::generate_literal(vars, 4)));
auto bn = p.add_instruction( auto bn = p.add_instruction(
migraph::op::batch_norm_inference{}, conv, scale, bias, mean, variance); migraph::op::batch_norm_inference{}, conv, scale, bias, mean, variance);
auto relu = p.add_instruction(migraph::op::activation{"relu"}, bn); auto relu = p.add_instruction(migraph::op::relu{}, bn);
p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu); p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu);
return p; return p;
} }
...@@ -727,7 +727,7 @@ struct test_conv_bn_relu_pooling2 ...@@ -727,7 +727,7 @@ struct test_conv_bn_relu_pooling2
auto conv2 = p.add_instruction(migraph::op::convolution{{0, 0}, {2, 2}, {1, 1}}, x2, w2); auto conv2 = p.add_instruction(migraph::op::convolution{{0, 0}, {2, 2}, {1, 1}}, x2, w2);
auto bn2 = add_bn(p, conv2, 2048); auto bn2 = add_bn(p, conv2, 2048);
auto add = p.add_instruction(migraph::op::add{}, bn1, bn2); auto add = p.add_instruction(migraph::op::add{}, bn1, bn2);
auto relu = p.add_instruction(migraph::op::activation{"relu"}, add); auto relu = p.add_instruction(migraph::op::relu{}, add);
p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu); p.add_instruction(migraph::op::pooling{"average", {1, 1}, {2, 2}, {3, 3}}, relu);
return p; return p;
} }
......
...@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool() ...@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1); auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5); auto l6 = p.add_instruction(migraph::op::relu{}, l5);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx"); auto prog = migraph::parse_onnx("conv_relu_maxpool.onnx");
...@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool() ...@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool()
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6); auto l6 = p.add_instruction(migraph::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraph::op::activation{"relu"}, l6); auto l7 = p.add_instruction(migraph::op::relu{}, l6);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx"); auto prog = migraph::parse_onnx("conv_bn_relu_maxpool.onnx");
...@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2() ...@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1); auto l3 = p.add_instruction(migraph::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2); auto l4 = p.add_instruction(migraph::op::broadcast{axis, l3->get_shape()}, l2);
auto l5 = p.add_instruction(migraph::op::add{}, l3, l4); auto l5 = p.add_instruction(migraph::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraph::op::activation{"relu"}, l5); auto l6 = p.add_instruction(migraph::op::relu{}, l5);
auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6); auto l7 = p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}}); auto l8 = p.add_parameter("3", {migraph::shape::float_type, {1, 5, 5, 5}});
...@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2() ...@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2()
auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8); auto l10 = p.add_instruction(migraph::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraph::op::broadcast{axis, l10->get_shape()}, l9); auto l11 = p.add_instruction(migraph::op::broadcast{axis, l10->get_shape()}, l9);
auto l12 = p.add_instruction(migraph::op::add{}, l10, l11); auto l12 = p.add_instruction(migraph::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraph::op::activation{"relu"}, l12); auto l13 = p.add_instruction(migraph::op::relu{}, l12);
p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13); p.add_instruction(migraph::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx"); auto prog = migraph::parse_onnx("conv_relu_maxpoolX2.onnx");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment