Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
a813d1d6
Commit
a813d1d6
authored
Oct 19, 2018
by
Khalique
Browse files
added leaky_relu
parent
11d00d61
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
151 additions
and
0 deletions
+151
-0
src/include/migraph/operators.hpp
src/include/migraph/operators.hpp
+16
-0
src/targets/cpu/cpu_lowering.cpp
src/targets/cpu/cpu_lowering.cpp
+13
-0
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
+36
-0
src/targets/gpu/include/migraph/gpu/miopen.hpp
src/targets/gpu/include/migraph/gpu/miopen.hpp
+7
-0
src/targets/gpu/leaky_relu.cpp
src/targets/gpu/leaky_relu.cpp
+37
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+11
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+31
-0
No files found.
src/include/migraph/operators.hpp
View file @
a813d1d6
...
...
@@ -224,6 +224,22 @@ struct activation
}
};
struct
leaky_relu
{
std
::
string
name
()
const
{
return
"leaky_relu"
;
}
float
alpha
;
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
return
inputs
.
front
();
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
leaky_relu
&
op
)
{
os
<<
op
.
name
()
<<
":"
<<
op
.
alpha
;
return
os
;
}
};
struct
transpose
{
std
::
vector
<
int64_t
>
dims
;
...
...
src/targets/cpu/cpu_lowering.cpp
View file @
a813d1d6
...
...
@@ -413,6 +413,18 @@ struct relu_op
}
};
struct
leaky_relu_op
{
op
::
leaky_relu
op
;
std
::
string
name
()
const
{
return
"cpu::leaky_relu"
;
}
auto
fcn
()
const
{
auto
&
a
=
op
.
alpha
;
return
[
a
](
auto
x
)
{
return
x
>
0
?
x
:
x
*
a
;
};
}
};
template
<
typename
Op
>
struct
cpu_unary
{
...
...
@@ -557,6 +569,7 @@ struct cpu_apply
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"identity"
]
=
simple_op
<
cpu_unary
<
identity_op
>>
();
apply_map
[
"tanh"
]
=
simple_op
<
cpu_unary
<
tanh_op
>>
();
...
...
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
0 → 100644
View file @
a813d1d6
#ifndef MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPH_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraph/gpu/lowering.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
#include <migraph/dfor.hpp>
#include <migraph/gpu/device/contiguous.hpp>
#include <migraph/gpu/device/add.hpp>
#include <migraph/iterator_for.hpp>
#include <migraph/gpu/rocblas.hpp>
#include <migraph/gpu/context.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
struct
miopen_leaky_relu
{
shared
<
activation_descriptor
>
ad
;
std
::
string
name
()
const
{
return
"gpu::leaky_relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
};
}
// namespace gpu
}
// namespace migraph
#endif
src/targets/gpu/include/migraph/gpu/miopen.hpp
View file @
a813d1d6
...
...
@@ -87,6 +87,13 @@ inline activation_descriptor make_relu()
return
ad
;
}
inline
activation_descriptor
make_leaky_relu
(
double
alpha
)
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
miopenSetActivationDescriptor
(
ad
.
get
(),
miopenActivationLEAKYRELU
,
alpha
,
0
,
0
);
return
ad
;
}
inline
fusion_plan_descriptor
make_fusion_plan
(
const
shape
&
input
)
{
auto
t
=
make_tensor
(
input
);
...
...
src/targets/gpu/leaky_relu.cpp
0 → 100644
View file @
a813d1d6
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace
migraph
{
namespace
gpu
{
shape
miopen_leaky_relu
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_leaky_relu
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
handle
.
get
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace migraph
src/targets/gpu/lowering.cpp
View file @
a813d1d6
...
...
@@ -16,6 +16,7 @@
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/add.hpp>
#include <migraph/gpu/batchnorm.hpp>
...
...
@@ -129,6 +130,16 @@ struct miopen_apply
return
ins
;
}
instruction_ref
apply_leaky_relu
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
leaky_relu
>
(
ins
->
get_operator
());
auto
ad
=
make_leaky_relu
(
op
.
alpha
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_leaky_relu
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_softmax
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
softmax
>
(
ins
->
get_operator
());
...
...
test/cpu_ops_test.cpp
View file @
a813d1d6
...
...
@@ -461,6 +461,34 @@ void div_test()
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
relu_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.
f
,
0.
f
,
1.
f
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
leaky_relu_test
()
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
0.01
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
0.01
f
,
0.
f
,
1.
f
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
reshape_test
()
{
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
@@ -917,6 +945,9 @@ int main()
add_broadcast_test
();
sub_test
();
mul_test
();
div_test
();
relu_test
();
leaky_relu_test
();
gemm_test
<
float
>
();
gemm_test
<
double
>
();
reshape_test
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment