Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
42a97dfb
Unverified
Commit
42a97dfb
authored
Aug 31, 2020
by
Shucai Xiao
Committed by
GitHub
Aug 31, 2020
Browse files
Not reflect actv desc (#624)
* not refect activation desriptor for some mipen operators * clang format
parent
69925294
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
41 additions
and
54 deletions
+41
-54
src/targets/gpu/abs.cpp
src/targets/gpu/abs.cpp
+2
-0
src/targets/gpu/elu.cpp
src/targets/gpu/elu.cpp
+5
-0
src/targets/gpu/include/migraphx/gpu/abs.hpp
src/targets/gpu/include/migraphx/gpu/abs.hpp
+5
-1
src/targets/gpu/include/migraphx/gpu/elu.hpp
src/targets/gpu/include/migraphx/gpu/elu.hpp
+5
-1
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
+5
-1
src/targets/gpu/include/migraphx/gpu/lrn.hpp
src/targets/gpu/include/migraphx/gpu/lrn.hpp
+4
-1
src/targets/gpu/leaky_relu.cpp
src/targets/gpu/leaky_relu.cpp
+6
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+4
-39
src/targets/gpu/lrn.cpp
src/targets/gpu/lrn.cpp
+5
-0
test/py/onnx_backend_test.py
test/py/onnx_backend_test.py
+0
-11
No files found.
src/targets/gpu/abs.cpp
View file @
42a97dfb
...
@@ -31,6 +31,8 @@ argument miopen_abs::compute(context& ctx,
...
@@ -31,6 +31,8 @@ argument miopen_abs::compute(context& ctx,
return
args
[
1
];
return
args
[
1
];
}
}
void
miopen_abs
::
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
)
{
ad
=
make_abs
();
}
}
// namespace gpu
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
src/targets/gpu/elu.cpp
View file @
42a97dfb
...
@@ -31,6 +31,11 @@ argument miopen_elu::compute(context& ctx,
...
@@ -31,6 +31,11 @@ argument miopen_elu::compute(context& ctx,
return
args
[
1
];
return
args
[
1
];
}
}
void
miopen_elu
::
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
)
{
ad
=
make_elu
(
op
.
alpha
);
}
}
// namespace gpu
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/abs.hpp
View file @
42a97dfb
...
@@ -2,7 +2,9 @@
...
@@ -2,7 +2,9 @@
#define MIGRAPHX_GUARD_RTGLIB_ABS_HPP
#define MIGRAPHX_GUARD_RTGLIB_ABS_HPP
#include <migraphx/shape.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/op/abs.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -12,18 +14,20 @@ struct context;
...
@@ -12,18 +14,20 @@ struct context;
struct
miopen_abs
struct
miopen_abs
{
{
op
::
abs
op
;
shared
<
activation_descriptor
>
ad
;
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
return
gpu
::
reflect
(
self
.
ad
.
get
()
,
f
);
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
}
std
::
string
name
()
const
{
return
"gpu::abs"
;
}
std
::
string
name
()
const
{
return
"gpu::abs"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
void
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
);
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
return
shapes
.
size
()
-
1
;
...
...
src/targets/gpu/include/migraphx/gpu/elu.hpp
View file @
42a97dfb
#ifndef MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#ifndef MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#include <migraphx/op/elu.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace
migraphx
{
namespace
migraphx
{
...
@@ -12,18 +14,20 @@ struct context;
...
@@ -12,18 +14,20 @@ struct context;
struct
miopen_elu
struct
miopen_elu
{
{
op
::
elu
op
;
shared
<
activation_descriptor
>
ad
;
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
return
gpu
::
reflect
(
self
.
ad
.
get
()
,
f
);
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
}
std
::
string
name
()
const
{
return
"gpu::elu"
;
}
std
::
string
name
()
const
{
return
"gpu::elu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
void
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
);
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
return
shapes
.
size
()
-
1
;
...
...
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
View file @
42a97dfb
#ifndef MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#ifndef MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace
migraphx
{
namespace
migraphx
{
...
@@ -12,18 +14,20 @@ struct context;
...
@@ -12,18 +14,20 @@ struct context;
struct
miopen_leaky_relu
struct
miopen_leaky_relu
{
{
op
::
leaky_relu
op
;
shared
<
activation_descriptor
>
ad
;
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
return
gpu
::
reflect
(
self
.
ad
.
get
()
,
f
);
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
}
std
::
string
name
()
const
{
return
"gpu::leaky_relu"
;
}
std
::
string
name
()
const
{
return
"gpu::leaky_relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
void
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
);
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
return
shapes
.
size
()
-
1
;
...
...
src/targets/gpu/include/migraphx/gpu/lrn.hpp
View file @
42a97dfb
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#define MIGRAPHX_GUARD_RTGLIB_LRN_HPP
#define MIGRAPHX_GUARD_RTGLIB_LRN_HPP
#include <migraphx/shape.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace
migraphx
{
namespace
migraphx
{
...
@@ -12,18 +13,20 @@ struct context;
...
@@ -12,18 +13,20 @@ struct context;
struct
miopen_lrn
struct
miopen_lrn
{
{
op
::
lrn
op
;
shared
<
lrn_descriptor
>
ldesc
;
shared
<
lrn_descriptor
>
ldesc
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
return
gpu
::
reflect
(
self
.
ldesc
.
get
()
,
f
);
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
}
std
::
string
name
()
const
{
return
"gpu::lrn"
;
}
std
::
string
name
()
const
{
return
"gpu::lrn"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
void
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
);
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
return
shapes
.
size
()
-
1
;
...
...
src/targets/gpu/leaky_relu.cpp
View file @
42a97dfb
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -31,6 +32,11 @@ argument miopen_leaky_relu::compute(context& ctx,
...
@@ -31,6 +32,11 @@ argument miopen_leaky_relu::compute(context& ctx,
return
args
[
1
];
return
args
[
1
];
}
}
void
miopen_leaky_relu
::
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
)
{
ad
=
make_leaky_relu
(
op
.
alpha
);
}
}
// namespace gpu
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
src/targets/gpu/lowering.cpp
View file @
42a97dfb
...
@@ -91,11 +91,6 @@ struct miopen_apply
...
@@ -91,11 +91,6 @@ struct miopen_apply
create_output_names
();
create_output_names
();
add_miopen_simple_op
<
miopen_abs
>
(
"abs"
,
make_abs
);
add_miopen_extend_op
<
miopen_leaky_relu
,
op
::
leaky_relu
>
(
"leaky_relu"
,
make_leaky_relu
);
add_miopen_extend_op
<
miopen_elu
,
op
::
elu
>
(
"elu"
,
make_elu
);
add_generic_op
(
"acos"
);
add_generic_op
(
"acos"
);
add_generic_op
(
"acosh"
);
add_generic_op
(
"acosh"
);
add_generic_op
(
"add"
);
add_generic_op
(
"add"
);
...
@@ -132,13 +127,17 @@ struct miopen_apply
...
@@ -132,13 +127,17 @@ struct miopen_apply
add_generic_op
(
"tan"
);
add_generic_op
(
"tan"
);
add_generic_op
(
"tanh"
);
add_generic_op
(
"tanh"
);
add_extend_op
(
"abs"
);
add_extend_op
(
"argmax"
);
add_extend_op
(
"argmax"
);
add_extend_op
(
"argmin"
);
add_extend_op
(
"argmin"
);
add_extend_op
(
"clip"
);
add_extend_op
(
"clip"
);
add_extend_op
(
"concat"
);
add_extend_op
(
"concat"
);
add_extend_op
(
"convert"
);
add_extend_op
(
"convert"
);
add_extend_op
(
"elu"
);
add_extend_op
(
"gather"
);
add_extend_op
(
"gather"
);
add_extend_op
(
"leaky_relu"
);
add_extend_op
(
"logsoftmax"
);
add_extend_op
(
"logsoftmax"
);
add_extend_op
(
"lrn"
);
add_extend_op
(
"pad"
);
add_extend_op
(
"pad"
);
add_extend_op
(
"reduce_max"
);
add_extend_op
(
"reduce_max"
);
add_extend_op
(
"reduce_mean"
);
add_extend_op
(
"reduce_mean"
);
...
@@ -152,7 +151,6 @@ struct miopen_apply
...
@@ -152,7 +151,6 @@ struct miopen_apply
add_gemm_op
<
op
::
dot
>
(
"dot"
);
add_gemm_op
<
op
::
dot
>
(
"dot"
);
add_gemm_op
<
op
::
quant_dot
>
(
"quant_dot"
);
add_gemm_op
<
op
::
quant_dot
>
(
"quant_dot"
);
add_lrn_op
();
add_convolution_op
();
add_convolution_op
();
add_deconvolution_op
();
add_deconvolution_op
();
add_quant_convolution_op
();
add_quant_convolution_op
();
...
@@ -327,17 +325,6 @@ struct miopen_apply
...
@@ -327,17 +325,6 @@ struct miopen_apply
});
});
}
}
void
add_lrn_op
()
{
apply_map
.
emplace
(
"lrn"
,
[
=
](
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
lrn
>
(
ins
->
get_operator
());
auto
ldesc
=
make_lrn
(
op
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_lrn
{
std
::
move
(
ldesc
)},
ins
->
inputs
().
at
(
0
),
output
);
});
}
void
add_generic_op
(
const
std
::
string
&
name
)
{
add_generic_op
(
name
,
"gpu::"
+
name
);
}
void
add_generic_op
(
const
std
::
string
&
name
)
{
add_generic_op
(
name
,
"gpu::"
+
name
);
}
void
add_generic_op
(
const
std
::
string
&
op_name
,
const
std
::
string
&
gpu_name
)
void
add_generic_op
(
const
std
::
string
&
op_name
,
const
std
::
string
&
gpu_name
)
...
@@ -365,28 +352,6 @@ struct miopen_apply
...
@@ -365,28 +352,6 @@ struct miopen_apply
});
});
}
}
template
<
class
T
,
class
Op
,
class
F
>
void
add_miopen_extend_op
(
std
::
string
name
,
F
f
)
{
apply_map
.
emplace
(
name
,
[
=
](
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
Op
>
(
ins
->
get_operator
());
auto
ad
=
f
(
op
.
alpha
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
T
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
});
}
template
<
class
T
,
class
F
>
void
add_miopen_simple_op
(
std
::
string
name
,
F
f
)
{
apply_map
.
emplace
(
name
,
[
=
](
instruction_ref
ins
)
{
auto
ad
=
f
();
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
T
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
});
}
void
add_batch_norm_inference_op
()
void
add_batch_norm_inference_op
()
{
{
apply_map
.
emplace
(
"batch_norm_inference"
,
[
=
](
instruction_ref
ins
)
{
apply_map
.
emplace
(
"batch_norm_inference"
,
[
=
](
instruction_ref
ins
)
{
...
...
src/targets/gpu/lrn.cpp
View file @
42a97dfb
...
@@ -33,6 +33,11 @@ argument miopen_lrn::compute(context& ctx,
...
@@ -33,6 +33,11 @@ argument miopen_lrn::compute(context& ctx,
return
args
[
1
];
return
args
[
1
];
}
}
void
miopen_lrn
::
finalize
(
context
&
,
const
shape
&
,
const
std
::
vector
<
shape
>&
)
{
ldesc
=
make_lrn
(
op
);
}
}
// namespace gpu
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
test/py/onnx_backend_test.py
View file @
42a97dfb
...
@@ -218,11 +218,6 @@ def create_backend_test(testname=None, target_device=None):
...
@@ -218,11 +218,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test
.
exclude
(
r
'test_depthtospace_example_cpu'
)
backend_test
.
exclude
(
r
'test_depthtospace_example_cpu'
)
backend_test
.
exclude
(
r
'test_expand_dim_changed_cpu'
)
backend_test
.
exclude
(
r
'test_expand_dim_changed_cpu'
)
backend_test
.
exclude
(
r
'test_expand_dim_unchanged_cpu'
)
backend_test
.
exclude
(
r
'test_expand_dim_unchanged_cpu'
)
backend_test
.
exclude
(
r
'test_gather_0_cpu'
)
backend_test
.
exclude
(
r
'test_gather_1_cpu'
)
backend_test
.
exclude
(
r
'test_gather_elements_0_cpu'
)
backend_test
.
exclude
(
r
'test_gather_elements_1_cpu'
)
backend_test
.
exclude
(
r
'test_gather_elements_negative_indices_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_float32_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_float32_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_int32_batch_dim1_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_int32_batch_dim1_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_int32_cpu'
)
backend_test
.
exclude
(
r
'test_gathernd_example_int32_cpu'
)
...
@@ -241,16 +236,10 @@ def create_backend_test(testname=None, target_device=None):
...
@@ -241,16 +236,10 @@ def create_backend_test(testname=None, target_device=None):
backend_test
.
exclude
(
r
'test_less_equal_bcast_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_less_equal_bcast_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_less_equal_cpu'
)
backend_test
.
exclude
(
r
'test_less_equal_cpu'
)
backend_test
.
exclude
(
r
'test_less_equal_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_less_equal_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_max_float16_cpu'
)
backend_test
.
exclude
(
r
'test_max_int64_cpu'
)
backend_test
.
exclude
(
r
'test_max_uint64_cpu'
)
backend_test
.
exclude
(
r
'test_maxpool_2d_uint8_cpu'
)
backend_test
.
exclude
(
r
'test_maxpool_2d_uint8_cpu'
)
backend_test
.
exclude
(
r
'test_mean_example_cpu'
)
backend_test
.
exclude
(
r
'test_mean_example_cpu'
)
backend_test
.
exclude
(
r
'test_mean_one_input_cpu'
)
backend_test
.
exclude
(
r
'test_mean_one_input_cpu'
)
backend_test
.
exclude
(
r
'test_mean_two_inputs_cpu'
)
backend_test
.
exclude
(
r
'test_mean_two_inputs_cpu'
)
backend_test
.
exclude
(
r
'test_min_float16_cpu'
)
backend_test
.
exclude
(
r
'test_min_int64_cpu'
)
backend_test
.
exclude
(
r
'test_min_uint64_cpu'
)
backend_test
.
exclude
(
r
'test_negative_log_likelihood_loss_*'
)
backend_test
.
exclude
(
r
'test_negative_log_likelihood_loss_*'
)
backend_test
.
exclude
(
r
'test_not_2d_cpu'
)
backend_test
.
exclude
(
r
'test_not_2d_cpu'
)
backend_test
.
exclude
(
r
'test_not_3d_cpu'
)
backend_test
.
exclude
(
r
'test_not_3d_cpu'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment