Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
5168b178
Commit
5168b178
authored
Nov 12, 2018
by
Khalique
Browse files
continue work on LRN
parent
c425d1a7
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
110 additions
and
19 deletions
+110
-19
src/include/migraph/operators.hpp
src/include/migraph/operators.hpp
+3
-3
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+10
-12
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+1
-0
src/targets/gpu/LRN.cpp
src/targets/gpu/LRN.cpp
+40
-0
src/targets/gpu/include/migraph/gpu/LRN.hpp
src/targets/gpu/include/migraph/gpu/LRN.hpp
+3
-3
src/targets/gpu/include/migraph/gpu/miopen.hpp
src/targets/gpu/include/migraph/gpu/miopen.hpp
+13
-1
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+13
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+15
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+12
-0
No files found.
src/include/migraph/operators.hpp
View file @
5168b178
...
@@ -51,13 +51,13 @@ struct batch_norm_inference
...
@@ -51,13 +51,13 @@ struct batch_norm_inference
}
}
};
};
struct
lrn
struct
LRN
{
{
float
alpha
=
0.0001
;
float
alpha
=
0.0001
;
float
beta
=
0.75
;
float
beta
=
0.75
;
float
bias
=
1.0
;
float
bias
=
1.0
;
int
size
;
int
size
;
std
::
string
name
()
const
{
return
"
lrn
"
;
}
std
::
string
name
()
const
{
return
"
LRN
"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
{
...
@@ -65,7 +65,7 @@ struct lrn
...
@@ -65,7 +65,7 @@ struct lrn
return
inputs
.
front
();
return
inputs
.
front
();
}
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
lrn
&
op
)
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
LRN
&
op
)
{
{
os
<<
op
.
name
()
<<
":"
<<
op
.
alpha
<<
":"
<<
op
.
beta
<<
":"
<<
op
.
bias
<<
":"
<<
op
.
size
;
os
<<
op
.
name
()
<<
":"
<<
op
.
alpha
<<
":"
<<
op
.
beta
<<
":"
<<
op
.
bias
<<
":"
<<
op
.
size
;
return
os
;
return
os
;
...
...
src/targets/cpu/lowering.cpp
View file @
5168b178
...
@@ -94,11 +94,11 @@ struct cpu_batch_norm_inference
...
@@ -94,11 +94,11 @@ struct cpu_batch_norm_inference
}
}
};
};
struct
cpu_
lrn
struct
cpu_
LRN
{
{
op
::
lrn
op
;
op
::
LRN
op
;
std
::
string
name
()
const
{
return
"cpu::
lrn
"
;
}
std
::
string
name
()
const
{
return
"cpu::
LRN
"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
{
...
@@ -108,23 +108,21 @@ struct cpu_lrn
...
@@ -108,23 +108,21 @@ struct cpu_lrn
int
channels
=
output_shape
.
lens
()[
1
];
int
channels
=
output_shape
.
lens
()[
1
];
int
height
=
output_shape
.
lens
()[
2
];
int
height
=
output_shape
.
lens
()[
2
];
int
width
=
output_shape
.
lens
()[
3
];
int
width
=
output_shape
.
lens
()[
3
];
auto
alphaoverarea
=
op
.
alpha
/
op
.
size
;
float
alphaoverarea
=
op
.
alpha
/
op
.
size
;
auto
radius
=
(
op
.
size
-
1
)
/
2
;
int
radius
=
(
op
.
size
-
1
)
/
2
;
dfor
(
n_batch
,
height
,
width
)([
&
](
int
b
,
int
h
,
int
w
)
{
dfor
(
n_batch
,
height
,
width
)([
&
](
int
b
,
int
h
,
int
w
)
{
double
scale
=
0
;
float
scale
=
0
;
dfor
(
channels
)([
&
](
int
c
)
{
dfor
(
channels
)([
&
](
int
c
)
{
auto
start
=
(
c
-
radius
)
<
0
?
0
:
(
c
-
radius
);
auto
start
=
(
c
-
radius
)
<
0
?
0
:
(
c
-
radius
);
auto
end
=
(
c
+
radius
)
>
channels
?
channels
:
(
c
+
radius
);
auto
end
=
(
c
+
radius
)
>
channels
?
channels
:
(
c
+
radius
);
for
(
auto
k
=
start
;
k
<
end
;
++
k
)
for
(
auto
k
=
start
;
k
<
end
;
++
k
)
{
{
scale
+=
std
::
pow
(
input
(
b
,
c
,
h
,
w
),
2
);
scale
+=
std
::
pow
(
input
(
b
,
k
,
h
,
w
),
2
);
}
}
scale
*=
alphaoverarea
;
scale
*=
alphaoverarea
;
scale
+=
op
.
bias
;
scale
+=
op
.
bias
;
scale
=
std
::
pow
(
scale
,
-
op
.
beta
);
scale
=
std
::
pow
(
scale
,
-
op
.
beta
);
output
(
b
,
c
,
h
,
w
)
=
input
(
b
,
c
,
h
,
w
)
*
scale
;
output
(
b
,
c
,
h
,
w
)
=
input
(
b
,
c
,
h
,
w
)
*
scale
;
});
});
});
});
...
@@ -635,7 +633,7 @@ struct cpu_apply
...
@@ -635,7 +633,7 @@ struct cpu_apply
apply_map
[
"dot"
]
=
extend_op
<
cpu_gemm
,
op
::
dot
>
();
apply_map
[
"dot"
]
=
extend_op
<
cpu_gemm
,
op
::
dot
>
();
apply_map
[
"batch_norm_inference"
]
=
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"
lrn
"
]
=
extend_op
<
cpu_
lrn
,
op
::
lrn
>
();
apply_map
[
"
LRN
"
]
=
extend_op
<
cpu_
LRN
,
op
::
LRN
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
5168b178
...
@@ -42,6 +42,7 @@ add_library(migraph_gpu
...
@@ -42,6 +42,7 @@ add_library(migraph_gpu
batchnorm.cpp
batchnorm.cpp
write_literals.cpp
write_literals.cpp
rocblas.cpp
rocblas.cpp
LRN.cpp
)
)
set_target_properties
(
migraph_gpu PROPERTIES EXPORT_NAME gpu
)
set_target_properties
(
migraph_gpu PROPERTIES EXPORT_NAME gpu
)
rocm_clang_tidy_check
(
migraph_gpu
)
rocm_clang_tidy_check
(
migraph_gpu
)
...
...
src/targets/gpu/LRN.cpp
0 → 100644
View file @
5168b178
#include <migraph/gpu/LRN.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
#include <utility>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_LRN
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_LRN
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
,
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenLRNForward
(
ctx
.
get_stream
().
get_miopen
(),
ldesc
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
(),
false
,
nullptr
);
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
src/targets/gpu/include/migraph/gpu/
lrn
.hpp
→
src/targets/gpu/include/migraph/gpu/
LRN
.hpp
View file @
5168b178
...
@@ -22,10 +22,10 @@ namespace migraph {
...
@@ -22,10 +22,10 @@ namespace migraph {
inline
namespace
MIGRAPH_INLINE_NS
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
namespace
gpu
{
struct
miopen_
lrn
struct
miopen_
LRN
{
{
shared
<
lrn
_descriptor
>
ld
;
shared
<
LRN
_descriptor
>
ld
esc
;
std
::
string
name
()
const
{
return
"gpu::
lrn
"
;
}
std
::
string
name
()
const
{
return
"gpu::
LRN
"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
...
...
src/targets/gpu/include/migraph/gpu/miopen.hpp
View file @
5168b178
...
@@ -22,7 +22,7 @@ using activation_descriptor = MIGRAPH_MANAGE_PTR(miopenActivationDescriptor_t,
...
@@ -22,7 +22,7 @@ using activation_descriptor = MIGRAPH_MANAGE_PTR(miopenActivationDescriptor_t,
using
fusion_plan_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenFusionPlanDescriptor_t
,
using
fusion_plan_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenFusionPlanDescriptor_t
,
miopenDestroyFusionPlan
);
miopenDestroyFusionPlan
);
using
fused_operator_args
=
MIGRAPH_MANAGE_PTR
(
miopenOperatorArgs_t
,
miopenDestroyOperatorArgs
);
using
fused_operator_args
=
MIGRAPH_MANAGE_PTR
(
miopenOperatorArgs_t
,
miopenDestroyOperatorArgs
);
using
lrn
_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenLRNDescriptor_t
,
miopenDestroyLRNDescriptor
);
using
LRN
_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenLRNDescriptor_t
,
miopenDestroyLRNDescriptor
);
template
<
class
Result
,
class
F
,
class
...
Ts
>
template
<
class
Result
,
class
F
,
class
...
Ts
>
Result
make_obj
(
F
f
,
Ts
...
xs
)
Result
make_obj
(
F
f
,
Ts
...
xs
)
...
@@ -85,6 +85,18 @@ inline pooling_descriptor make_pooling(const migraph::op::pooling& op)
...
@@ -85,6 +85,18 @@ inline pooling_descriptor make_pooling(const migraph::op::pooling& op)
return
p
;
return
p
;
}
}
inline
LRN_descriptor
make_LRN
(
const
migraph
::
op
::
LRN
&
op
)
{
auto
ldesc
=
make_obj
<
LRN_descriptor
>
(
&
miopenCreateLRNDescriptor
);
miopenSetLRNDescriptor
(
ldesc
.
get
(),
miopenLRNCrossChannel
,
op
.
size
,
op
.
alpha
,
op
.
beta
,
op
.
bias
);
return
ldesc
;
}
inline
activation_descriptor
make_relu
()
inline
activation_descriptor
make_relu
()
{
{
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
auto
ad
=
make_obj
<
activation_descriptor
>
(
&
miopenCreateActivationDescriptor
);
...
...
src/targets/gpu/lowering.cpp
View file @
5168b178
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include <migraph/gpu/context.hpp>
#include <migraph/gpu/context.hpp>
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/LRN.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
#include <migraph/gpu/softmax.hpp>
...
@@ -63,6 +64,10 @@ struct miopen_apply
...
@@ -63,6 +64,10 @@ struct miopen_apply
{
{
check_shape
(
s
,
apply_pooling
(
it
));
check_shape
(
s
,
apply_pooling
(
it
));
}
}
else
if
(
it
->
name
()
==
"LRN"
)
{
check_shape
(
s
,
apply_LRN
(
it
));
}
else
if
(
it
->
name
()
==
"add"
)
else
if
(
it
->
name
()
==
"add"
)
{
{
check_shape
(
s
,
apply_add
(
it
));
check_shape
(
s
,
apply_add
(
it
));
...
@@ -132,6 +137,14 @@ struct miopen_apply
...
@@ -132,6 +137,14 @@ struct miopen_apply
ins
,
miopen_pooling
{
op
,
std
::
move
(
pd
)},
ins
->
inputs
().
at
(
0
),
output
);
ins
,
miopen_pooling
{
op
,
std
::
move
(
pd
)},
ins
->
inputs
().
at
(
0
),
output
);
}
}
instruction_ref
apply_LRN
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
LRN
>
(
ins
->
get_operator
());
auto
ldesc
=
make_LRN
(
op
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_LRN
{
std
::
move
(
ldesc
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_relu
(
instruction_ref
ins
)
instruction_ref
apply_relu
(
instruction_ref
ins
)
{
{
auto
ad
=
make_relu
();
auto
ad
=
make_relu
();
...
...
test/cpu_ops_test.cpp
View file @
5168b178
...
@@ -579,6 +579,21 @@ TEST_CASE(leaky_relu_test)
...
@@ -579,6 +579,21 @@ TEST_CASE(leaky_relu_test)
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
LRN_test
)
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
1
,
1
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
2.0
f
,
1.0
f
,
0.
f
,
1.0
f
,
2.0
f
}});
p
.
add_instruction
(
migraph
::
op
::
LRN
{
0.0001
,
0.75
,
1
,
5
},
l
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
5
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
2
/
1.000075
,
1
/
1.00009
,
0
/
1.000145
,
1
/
1.00009
,
2
/
1.000075
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
imagescaler_test
)
TEST_CASE
(
imagescaler_test
)
{
{
migraph
::
program
p
;
migraph
::
program
p
;
...
...
test/gpu/miopen.cpp
View file @
5168b178
...
@@ -450,6 +450,17 @@ struct test_leaky_relu
...
@@ -450,6 +450,17 @@ struct test_leaky_relu
}
}
};
};
struct
test_LRN
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
2
,
2
}});
p
.
add_instruction
(
migraph
::
op
::
LRN
{
0.0001
,
0.75
,
1.0
,
5
},
x
);
return
p
;
}
};
struct
test_conv_pooling
struct
test_conv_pooling
{
{
migraph
::
program
create_program
()
const
migraph
::
program
create_program
()
const
...
@@ -829,6 +840,7 @@ struct test_conv_bn_relu_pooling2
...
@@ -829,6 +840,7 @@ struct test_conv_bn_relu_pooling2
int
main
()
int
main
()
{
{
verify_program
<
test_LRN
>
();
verify_program
<
test_concat
>
();
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_concat_relu
>
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment