Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
e861793a
Commit
e861793a
authored
Nov 13, 2018
by
Khalique
Browse files
added onnx parsing
parent
a79ab4d7
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
50 additions
and
29 deletions
+50
-29
src/include/migraph/operators.hpp
src/include/migraph/operators.hpp
+3
-3
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+21
-0
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+4
-4
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+1
-1
src/targets/gpu/include/migraph/gpu/lrn.hpp
src/targets/gpu/include/migraph/gpu/lrn.hpp
+3
-3
src/targets/gpu/include/migraph/gpu/miopen.hpp
src/targets/gpu/include/migraph/gpu/miopen.hpp
+3
-3
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+7
-7
src/targets/gpu/lrn.cpp
src/targets/gpu/lrn.cpp
+3
-3
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+2
-2
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+3
-3
No files found.
src/include/migraph/operators.hpp
View file @
e861793a
...
...
@@ -51,13 +51,13 @@ struct batch_norm_inference
}
};
struct
LRN
struct
lrn
{
float
alpha
=
0.0001
;
float
beta
=
0.75
;
float
bias
=
1.0
;
int
size
;
std
::
string
name
()
const
{
return
"
LRN
"
;
}
std
::
string
name
()
const
{
return
"
lrn
"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
...
...
@@ -65,7 +65,7 @@ struct LRN
return
inputs
.
front
();
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
LRN
&
op
)
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
lrn
&
op
)
{
os
<<
op
.
name
()
<<
":"
<<
op
.
alpha
<<
":"
<<
op
.
beta
<<
":"
<<
op
.
bias
<<
":"
<<
op
.
size
;
return
os
;
...
...
src/onnx/onnx.cpp
View file @
e861793a
...
...
@@ -59,6 +59,7 @@ struct onnx_parser
// disable dropout for inference
add_generic_op
(
"Dropout"
,
op
::
identity
{});
add_mem_op
(
"LRN"
,
&
onnx_parser
::
parse_lrn
);
add_mem_op
(
"ImageScaler"
,
&
onnx_parser
::
parse_imagescaler
);
add_mem_op
(
"LeakyRelu"
,
&
onnx_parser
::
parse_leaky_relu
);
add_mem_op
(
"Constant"
,
&
onnx_parser
::
parse_constant
);
...
...
@@ -336,6 +337,26 @@ struct onnx_parser
return
prog
.
add_instruction
(
op
,
args
.
front
());
}
instruction_ref
parse_lrn
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
float
alpha
=
0.0001
;
float
beta
=
0.75
;
float
bias
=
1.0
;
int
size
=
1
;
if
(
contains
(
attributes
,
"alpha"
))
alpha
=
parse_value
(
attributes
.
at
(
"alpha"
)).
at
<
float
>
();
if
(
contains
(
attributes
,
"beta"
))
beta
=
parse_value
(
attributes
.
at
(
"beta"
)).
at
<
float
>
();
if
(
contains
(
attributes
,
"bias"
))
bias
=
parse_value
(
attributes
.
at
(
"bias"
)).
at
<
float
>
();
if
(
contains
(
attributes
,
"size"
))
size
=
parse_value
(
attributes
.
at
(
"size"
)).
at
<
int
>
();
op
::
lrn
op
{
alpha
,
beta
,
bias
,
size
};
return
prog
.
add_instruction
(
op
,
args
.
front
());
}
instruction_ref
parse_imagescaler
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
...
...
src/targets/cpu/lowering.cpp
View file @
e861793a
...
...
@@ -94,11 +94,11 @@ struct cpu_batch_norm_inference
}
};
struct
cpu_
LRN
struct
cpu_
lrn
{
op
::
LRN
op
;
op
::
lrn
op
;
std
::
string
name
()
const
{
return
"cpu::
LRN
"
;
}
std
::
string
name
()
const
{
return
"cpu::
lrn
"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
...
...
@@ -633,7 +633,7 @@ struct cpu_apply
apply_map
[
"dot"
]
=
extend_op
<
cpu_gemm
,
op
::
dot
>
();
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"
LRN
"
]
=
extend_op
<
cpu_
LRN
,
op
::
LRN
>
();
apply_map
[
"
lrn
"
]
=
extend_op
<
cpu_
lrn
,
op
::
lrn
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
e861793a
...
...
@@ -42,7 +42,7 @@ add_library(migraph_gpu
batchnorm.cpp
write_literals.cpp
rocblas.cpp
LRN
.cpp
lrn
.cpp
)
set_target_properties
(
migraph_gpu PROPERTIES EXPORT_NAME gpu
)
rocm_clang_tidy_check
(
migraph_gpu
)
...
...
src/targets/gpu/include/migraph/gpu/
LRN
.hpp
→
src/targets/gpu/include/migraph/gpu/
lrn
.hpp
View file @
e861793a
...
...
@@ -22,10 +22,10 @@ namespace migraph {
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
struct
miopen_
LRN
struct
miopen_
lrn
{
shared
<
LRN
_descriptor
>
ldesc
;
std
::
string
name
()
const
{
return
"gpu::
LRN
"
;
}
shared
<
lrn
_descriptor
>
ldesc
;
std
::
string
name
()
const
{
return
"gpu::
lrn
"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
...
...
src/targets/gpu/include/migraph/gpu/miopen.hpp
View file @
e861793a
...
...
@@ -22,7 +22,7 @@ using activation_descriptor = MIGRAPH_MANAGE_PTR(miopenActivationDescriptor_t,
using
fusion_plan_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenFusionPlanDescriptor_t
,
miopenDestroyFusionPlan
);
using
fused_operator_args
=
MIGRAPH_MANAGE_PTR
(
miopenOperatorArgs_t
,
miopenDestroyOperatorArgs
);
using
LRN
_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenLRNDescriptor_t
,
miopenDestroyLRNDescriptor
);
using
lrn
_descriptor
=
MIGRAPH_MANAGE_PTR
(
miopenLRNDescriptor_t
,
miopenDestroyLRNDescriptor
);
template
<
class
Result
,
class
F
,
class
...
Ts
>
Result
make_obj
(
F
f
,
Ts
...
xs
)
...
...
@@ -85,9 +85,9 @@ inline pooling_descriptor make_pooling(const migraph::op::pooling& op)
return
p
;
}
inline
LRN
_descriptor
make_
LRN
(
const
migraph
::
op
::
LRN
&
op
)
inline
lrn
_descriptor
make_
lrn
(
const
migraph
::
op
::
lrn
&
op
)
{
auto
ldesc
=
make_obj
<
LRN
_descriptor
>
(
&
miopenCreateLRNDescriptor
);
auto
ldesc
=
make_obj
<
lrn
_descriptor
>
(
&
miopenCreateLRNDescriptor
);
miopenSetLRNDescriptor
(
ldesc
.
get
(),
miopenLRNCrossChannel
,
op
.
size
,
op
.
alpha
,
op
.
beta
,
op
.
bias
);
return
ldesc
;
}
...
...
src/targets/gpu/lowering.cpp
View file @
e861793a
...
...
@@ -15,7 +15,7 @@
#include <migraph/gpu/context.hpp>
#include <migraph/gpu/convolution.hpp>
#include <migraph/gpu/contiguous.hpp>
#include <migraph/gpu/
LRN
.hpp>
#include <migraph/gpu/
lrn
.hpp>
#include <migraph/gpu/relu.hpp>
#include <migraph/gpu/leaky_relu.hpp>
#include <migraph/gpu/softmax.hpp>
...
...
@@ -64,9 +64,9 @@ struct miopen_apply
{
check_shape
(
s
,
apply_pooling
(
it
));
}
else
if
(
it
->
name
()
==
"
LRN
"
)
else
if
(
it
->
name
()
==
"
lrn
"
)
{
check_shape
(
s
,
apply_
LRN
(
it
));
check_shape
(
s
,
apply_
lrn
(
it
));
}
else
if
(
it
->
name
()
==
"add"
)
{
...
...
@@ -137,13 +137,13 @@ struct miopen_apply
ins
,
miopen_pooling
{
op
,
std
::
move
(
pd
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_
LRN
(
instruction_ref
ins
)
instruction_ref
apply_
lrn
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
LRN
>
(
ins
->
get_operator
());
auto
ldesc
=
make_
LRN
(
op
);
auto
&&
op
=
any_cast
<
op
::
lrn
>
(
ins
->
get_operator
());
auto
ldesc
=
make_
lrn
(
op
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_
LRN
{
std
::
move
(
ldesc
)},
ins
->
inputs
().
at
(
0
),
output
);
ins
,
miopen_
lrn
{
std
::
move
(
ldesc
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_relu
(
instruction_ref
ins
)
...
...
src/targets/gpu/
LRN
.cpp
→
src/targets/gpu/
lrn
.cpp
View file @
e861793a
#include <migraph/gpu/
LRN
.hpp>
#include <migraph/gpu/
lrn
.hpp>
#include <migraph/operators.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/gpu/miopen.hpp>
...
...
@@ -8,13 +8,13 @@ namespace migraph {
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_
LRN
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
shape
miopen_
lrn
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_
LRN
::
compute
(
context
&
ctx
,
argument
miopen_
lrn
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
...
...
test/cpu_ops_test.cpp
View file @
e861793a
...
...
@@ -579,12 +579,12 @@ TEST_CASE(leaky_relu_test)
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
LRN
_test
)
TEST_CASE
(
lrn
_test
)
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
1
,
1
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
2.0
f
,
1.0
f
,
0.
f
,
1.0
f
,
2.0
f
}});
p
.
add_instruction
(
migraph
::
op
::
LRN
{
0.0001
,
0.75
,
1
,
5
},
l
);
p
.
add_instruction
(
migraph
::
op
::
lrn
{
0.0001
,
0.75
,
1
,
5
},
l
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
5
);
...
...
test/gpu/miopen.cpp
View file @
e861793a
...
...
@@ -450,13 +450,13 @@ struct test_leaky_relu
}
};
struct
test_
LRN
struct
test_
lrn
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
2
,
2
}});
p
.
add_instruction
(
migraph
::
op
::
LRN
{
0.0001
,
0.75
,
1.0
,
5
},
x
);
p
.
add_instruction
(
migraph
::
op
::
lrn
{
0.0001
,
0.75
,
1.0
,
5
},
x
);
return
p
;
}
};
...
...
@@ -840,7 +840,7 @@ struct test_conv_bn_relu_pooling2
int
main
()
{
verify_program
<
test_
LRN
>
();
verify_program
<
test_
lrn
>
();
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment