Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
9a0b9505
"docs/en_US/NNICTLDOC.md" did not exist on "61d47a4d9e51b0b5437f813ca4fa05f216d3c0e7"
Commit
9a0b9505
authored
Oct 23, 2018
by
wsttiger
Browse files
Merge from master
parents
f5c10331
945e89e0
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
208 additions
and
1 deletion
+208
-1
src/include/migraph/operators.hpp
src/include/migraph/operators.hpp
+16
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+14
-0
src/targets/cpu/cpu_lowering.cpp
src/targets/cpu/cpu_lowering.cpp
+12
-1
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+1
-0
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
+36
-0
src/targets/gpu/include/migraph/gpu/miopen.hpp
src/targets/gpu/include/migraph/gpu/miopen.hpp
+7
-0
src/targets/gpu/leaky_relu.cpp
src/targets/gpu/leaky_relu.cpp
+37
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+15
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+31
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+12
-0
test/onnx/leaky_relu.onnx
test/onnx/leaky_relu.onnx
+14
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+13
-0
No files found.
src/include/migraph/operators.hpp
View file @
9a0b9505
...
...
@@ -239,6 +239,22 @@ struct activation
}
};
struct
leaky_relu
{
std
::
string
name
()
const
{
return
"leaky_relu"
;
}
float
alpha
;
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
return
inputs
.
front
();
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
leaky_relu
&
op
)
{
os
<<
op
.
name
()
<<
":"
<<
op
.
alpha
;
return
os
;
}
};
struct
transpose
{
std
::
vector
<
int64_t
>
dims
;
...
...
src/onnx/onnx.cpp
View file @
9a0b9505
...
...
@@ -56,6 +56,7 @@ struct onnx_parser
add_generic_op
(
"Sub"
,
op
::
sub
{});
add_generic_op
(
"Sum"
,
op
::
add
{});
add_mem_op
(
"LeakyRelu"
,
&
onnx_parser
::
parse_leaky_relu
);
add_mem_op
(
"Constant"
,
&
onnx_parser
::
parse_constant
);
add_mem_op
(
"Conv"
,
&
onnx_parser
::
parse_conv
);
add_mem_op
(
"MaxPool"
,
&
onnx_parser
::
parse_pooling
);
...
...
@@ -260,6 +261,19 @@ struct onnx_parser
return
prog
.
add_instruction
(
op
,
std
::
move
(
args
));
}
instruction_ref
parse_leaky_relu
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
float
alpha
=
0.01
;
if
(
contains
(
attributes
,
"alpha"
))
{
alpha
=
parse_value
(
attributes
.
at
(
"alpha"
)).
at
<
float
>
();
}
op
::
leaky_relu
op
{
alpha
};
return
prog
.
add_instruction
(
op
,
args
.
front
());
}
void
parse_from
(
std
::
istream
&
is
)
{
onnx
::
ModelProto
model
;
...
...
src/targets/cpu/cpu_lowering.cpp
View file @
9a0b9505
...
...
@@ -454,6 +454,17 @@ struct relu_op
}
};
struct
leaky_relu_op
{
op
::
leaky_relu
op
;
std
::
string
name
()
const
{
return
"cpu::leaky_relu"
;
}
auto
fcn
()
const
{
auto
&
a
=
op
.
alpha
;
return
[
a
](
auto
x
)
{
return
x
>
0
?
x
:
x
*
a
;
};
}
};
template
<
typename
Op
>
struct
cpu_unary
{
...
...
@@ -599,7 +610,7 @@ struct cpu_apply
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"identity"
]
=
simple_op
<
cpu_unary
<
identity_op
>>
();
apply_map
[
"tanh"
]
=
simple_op
<
cpu_unary
<
tanh_op
>>
();
apply_map
[
"sigmoid"
]
=
simple_op
<
cpu_unary
<
sigmoid_op
>>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
9a0b9505
...
...
@@ -34,6 +34,7 @@ add_library(migraph_gpu
contiguous.cpp
concat.cpp
relu.cpp
leaky_relu.cpp
add.cpp
batchnorm.cpp
write_literals.cpp
...
...
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
0 → 100644
View file @
9a0b9505
#ifndef MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraph/gpu/lowering.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
#include <migraph/dfor.hpp>
#include <migraph/gpu/device/contiguous.hpp>
#include <migraph/gpu/device/add.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/gpu/rocblas.hpp>
#include <migraph/gpu/context.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
struct
miopen_leaky_relu
{
shared
<
activation_descriptor
>
ad
;
std
::
string
name
()
const
{
return
"gpu::leaky_relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
};
}
// namespace gpu
}
// namespace migraph
#endif
src/targets/gpu/include/migraph/gpu/miopen.hpp
View file @
9a0b9505
...
...
@@ -87,6 +87,13 @@ inline activation_descriptor make_relu()
return
ad
;
}
inline
activation_descriptor
make_leaky_relu
(
double
alpha
)
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
miopenSetActivationDescriptor
(
ad
.
get
(),
miopenActivationLEAKYRELU
,
alpha
,
0
,
0
);
return
ad
;
}
inline
fusion_plan_descriptor
make_fusion_plan
(
const
shape
&
input
)
{
auto
t
=
make_tensor
(
input
);
...
...
src/targets/gpu/leaky_relu.cpp
0 → 100644
View file @
9a0b9505
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
shape
miopen_leaky_relu
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_leaky_relu
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
handle
.
get
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace migraph
src/targets/gpu/lowering.cpp
View file @
9a0b9505
...
...
@@ -16,6 +16,7 @@
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/add.hpp>
#include <migraph/gpu/batchnorm.hpp>
...
...
@@ -52,6 +53,10 @@ struct miopen_apply
{
check_shape
(
s
,
apply_activation
(
it
));
}
else
if
(
it
->
name
()
==
"leaky_relu"
)
{
check_shape
(
s
,
apply_leaky_relu
(
it
));
}
else
if
(
it
->
name
()
==
"pooling"
)
{
check_shape
(
s
,
apply_pooling
(
it
));
...
...
@@ -134,6 +139,16 @@ struct miopen_apply
return
ins
;
}
instruction_ref
apply_leaky_relu
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
leaky_relu
>
(
ins
->
get_operator
());
auto
ad
=
make_leaky_relu
(
op
.
alpha
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_leaky_relu
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_softmax
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
softmax
>
(
ins
->
get_operator
());
...
...
test/cpu_ops_test.cpp
View file @
9a0b9505
...
...
@@ -511,6 +511,34 @@ void div_test()
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
relu_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.
f
,
0.
f
,
1.
f
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
leaky_relu_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
0.01
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
0.01
f
,
0.
f
,
1.
f
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
reshape_test
()
{
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
@@ -968,6 +996,9 @@ int main()
add_broadcast_test
();
sub_test
();
mul_test
();
div_test
();
relu_test
();
leaky_relu_test
();
gemm_test
<
float
>
();
gemm_test
<
double
>
();
reshape_test
();
...
...
test/gpu/miopen.cpp
View file @
9a0b9505
...
...
@@ -368,6 +368,17 @@ struct test_add_relu
}
};
struct
test_leaky_relu
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
0.01
},
x
);
return
p
;
}
};
struct
test_conv_pooling
{
migraph
::
program
create_program
()
const
...
...
@@ -655,6 +666,7 @@ int main()
verify_program
<
test_conv2
>
();
verify_program
<
test_conv_relu
>
();
verify_program
<
test_add_relu
>
();
verify_program
<
test_leaky_relu
>
();
verify_program
<
test_conv_pooling
>
();
verify_program
<
test_gemm
>
();
// verify_program<test_gemm_ld>();
...
...
test/onnx/leaky_relu.onnx
0 → 100644
View file @
9a0b9505
leaky_relu-example:R
"
01" LeakyRelu*
alpha
#<
test-modelZ
0
b
1
B
\ No newline at end of file
test/onnx/onnx_test.cpp
View file @
9a0b9505
...
...
@@ -88,10 +88,23 @@ void pytorch_conv_relu_maxpool_x2()
EXPECT
(
p
==
prog
);
}
void
leaky_relu_test
()
{
migraph
::
program
p
;
float
alpha
=
0.01
f
;
auto
l0
=
p
.
add_parameter
(
"0"
,
{
migraph
::
shape
::
float_type
,
{
3
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
alpha
},
l0
);
auto
prog
=
migraph
::
parse_onnx
(
"leaky_relu.onnx"
);
EXPECT
(
p
==
prog
);
}
int
main
()
{
pytorch_conv_bias_test
();
pytorch_conv_relu_maxpool
();
pytorch_conv_bn_relu_maxpool
();
pytorch_conv_relu_maxpool_x2
();
leaky_relu_test
();
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment