Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
53ae18ea
Commit
53ae18ea
authored
Aug 14, 2019
by
Khalique
Browse files
manual merge
parents
f773c763
fef8086c
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
233 additions
and
141 deletions
+233
-141
CMakeLists.txt
CMakeLists.txt
+3
-1
src/include/migraphx/op/sign.hpp
src/include/migraphx/op/sign.hpp
+32
-0
src/include/migraphx/operators.hpp
src/include/migraphx/operators.hpp
+1
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+1
-0
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+3
-2
src/targets/gpu/device/relu.cpp
src/targets/gpu/device/relu.cpp
+17
-0
src/targets/gpu/device/sign.cpp
src/targets/gpu/device/sign.cpp
+18
-0
src/targets/gpu/device/tanh.cpp
src/targets/gpu/device/tanh.cpp
+18
-0
src/targets/gpu/include/migraphx/gpu/device/relu.hpp
src/targets/gpu/include/migraphx/gpu/device/relu.hpp
+20
-0
src/targets/gpu/include/migraphx/gpu/device/sign.hpp
src/targets/gpu/include/migraphx/gpu/device/sign.hpp
+20
-0
src/targets/gpu/include/migraphx/gpu/device/tanh.hpp
src/targets/gpu/include/migraphx/gpu/device/tanh.hpp
+20
-0
src/targets/gpu/include/migraphx/gpu/relu.hpp
src/targets/gpu/include/migraphx/gpu/relu.hpp
+3
-19
src/targets/gpu/include/migraphx/gpu/sign.hpp
src/targets/gpu/include/migraphx/gpu/sign.hpp
+19
-0
src/targets/gpu/include/migraphx/gpu/tanh.hpp
src/targets/gpu/include/migraphx/gpu/tanh.hpp
+3
-21
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+4
-2
src/targets/gpu/relu.cpp
src/targets/gpu/relu.cpp
+0
-36
src/targets/gpu/tanh.cpp
src/targets/gpu/tanh.cpp
+0
-36
src/tf/tf.cpp
src/tf/tf.cpp
+24
-24
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+15
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+12
-0
No files found.
CMakeLists.txt
View file @
53ae18ea
...
...
@@ -32,7 +32,9 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif
()
endif
()
if
(
CMAKE_CXX_COMPILER MATCHES
".*hcc"
)
include
(
CheckCXXCompilerFlag
)
check_cxx_compiler_flag
(
"--cuda-host-only -x hip"
HAS_HIP
)
if
(
HAS_HIP
)
message
(
STATUS
"Enable miopen backend"
)
set
(
MIGRAPHX_ENABLE_GPU On CACHE BOOL
""
)
else
()
...
...
src/include/migraphx/op/sign.hpp
0 → 100644
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_OPERATORS_SIGN_HPP
#define MIGRAPHX_GUARD_OPERATORS_SIGN_HPP
#include <array>
#include <migraphx/op/unary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <cmath>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
struct
sign
:
unary
<
sign
>
{
auto
apply
()
const
{
return
[](
auto
x
)
{
return
(
x
>
0
?
1
:
((
x
<
0
)
?
-
1
:
0
));
};
}
};
}
// namespace op
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/include/migraphx/operators.hpp
View file @
53ae18ea
...
...
@@ -56,6 +56,7 @@
#include <migraphx/op/rsqrt.hpp>
#include <migraphx/op/scalar.hpp>
#include <migraphx/op/sigmoid.hpp>
#include <migraphx/op/sign.hpp>
#include <migraphx/op/sinh.hpp>
#include <migraphx/op/sin.hpp>
#include <migraphx/op/slice.hpp>
...
...
src/onnx/onnx.cpp
View file @
53ae18ea
...
...
@@ -55,6 +55,7 @@ struct onnx_parser
add_generic_op
(
"Acos"
,
op
::
acos
{});
add_generic_op
(
"Atan"
,
op
::
atan
{});
add_generic_op
(
"Sqrt"
,
op
::
sqrt
{});
add_generic_op
(
"Sign"
,
op
::
sign
{});
add_binary_op
(
"Add"
,
op
::
add
{});
add_binary_op
(
"Div"
,
op
::
div
{});
...
...
src/targets/gpu/CMakeLists.txt
View file @
53ae18ea
...
...
@@ -24,9 +24,11 @@ add_library(migraphx_device
device/tan.cpp
device/sinh.cpp
device/cosh.cpp
device/tanh.cpp
device/asin.cpp
device/acos.cpp
device/atan.cpp
device/relu.cpp
device/add_relu.cpp
device/contiguous.cpp
device/logsoftmax.cpp
...
...
@@ -45,6 +47,7 @@ add_library(migraphx_device
device/reduce_mean.cpp
device/pow.cpp
device/sqdiff.cpp
device/sign.cpp
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
rocm_clang_tidy_check
(
migraphx_device
)
...
...
@@ -67,9 +70,7 @@ add_library(migraphx_gpu
logsoftmax.cpp
contiguous.cpp
concat.cpp
relu.cpp
leaky_relu.cpp
tanh.cpp
batchnorm.cpp
write_literals.cpp
rocblas.cpp
...
...
src/targets/gpu/device/relu.cpp
0 → 100644
View file @
53ae18ea
#include <migraphx/gpu/device/relu.hpp>
#include <migraphx/gpu/device/nary.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
relu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
{
return
std
::
max
<
decltype
(
x
)
>
(
0
,
x
);
});
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/device/sign.cpp
0 → 100644
View file @
53ae18ea
#include <migraphx/gpu/device/sign.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
sign
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
{
return
(
x
>
0
?
1
:
((
x
<
0
)
?
-
1
:
0
));
});
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/device/tanh.cpp
0 → 100644
View file @
53ae18ea
#include <migraphx/gpu/device/tanh.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
tanh
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
{
return
::
tanh
(
to_hip_type
(
x
));
});
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/device/relu.hpp
0 → 100644
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_RELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_RELU_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
relu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/device/sign.hpp
0 → 100644
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SIGN_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SIGN_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
sign
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/device/tanh.hpp
0 → 100644
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_TANH_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_TANH_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
void
tanh
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/relu.hpp
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_RELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_RELU_HPP
#include <migraphx/
sha
pe.hpp>
#include <migraphx/gpu/
miopen
.hpp>
#include <migraphx/
gpu/o
pe
r
.hpp>
#include <migraphx/gpu/
device/relu
.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -10,24 +10,8 @@ namespace gpu {
struct
context
;
struct
miopen_relu
struct
hip_relu
:
unary_device
<
hip_relu
,
device
::
relu
>
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/sign.hpp
0 → 100644
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_SIGN_HPP
#define MIGRAPHX_GUARD_RTGLIB_SIGN_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/sign.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
hip_sign
:
unary_device
<
hip_sign
,
device
::
sign
>
{
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/tanh.hpp
View file @
53ae18ea
#ifndef MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#define MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#include <migraphx/
sha
pe.hpp>
#include <migraphx/gpu/
miopen
.hpp>
#include <migraphx/
gpu/o
pe
r
.hpp>
#include <migraphx/gpu/
device/tanh
.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
context
;
struct
miopen_tanh
struct
hip_tanh
:
unary_device
<
hip_tanh
,
device
::
tanh
>
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::tanh"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/lowering.cpp
View file @
53ae18ea
...
...
@@ -31,6 +31,7 @@
#include <migraphx/gpu/erf.hpp>
#include <migraphx/gpu/log.hpp>
#include <migraphx/gpu/sin.hpp>
#include <migraphx/gpu/sign.hpp>
#include <migraphx/gpu/cos.hpp>
#include <migraphx/gpu/tan.hpp>
#include <migraphx/gpu/sinh.hpp>
...
...
@@ -82,10 +83,8 @@ struct miopen_apply
void
init
()
{
this
->
last
=
instruction
::
get_output_alias
(
std
::
prev
(
prog
->
end
()));
add_miopen_simple_op
<
miopen_relu
>
(
"relu"
,
make_relu
);
add_miopen_simple_op
<
miopen_sigmoid
>
(
"sigmoid"
,
make_sigmoid
);
add_miopen_simple_op
<
miopen_abs
>
(
"abs"
,
make_abs
);
add_miopen_simple_op
<
miopen_tanh
>
(
"tanh"
,
make_tanh
);
add_miopen_extend_op
<
miopen_leaky_relu
,
op
::
leaky_relu
>
(
"leaky_relu"
,
make_leaky_relu
);
add_miopen_extend_op
<
miopen_elu
,
op
::
elu
>
(
"elu"
,
make_elu
);
...
...
@@ -100,6 +99,7 @@ struct miopen_apply
add_generic_op
<
hip_tan
>
(
"tan"
);
add_generic_op
<
hip_sinh
>
(
"sinh"
);
add_generic_op
<
hip_cosh
>
(
"cosh"
);
add_generic_op
<
hip_tanh
>
(
"tanh"
);
add_generic_op
<
hip_asin
>
(
"asin"
);
add_generic_op
<
hip_acos
>
(
"acos"
);
add_generic_op
<
hip_atan
>
(
"atan"
);
...
...
@@ -111,6 +111,8 @@ struct miopen_apply
add_generic_op
<
hip_rsqrt
>
(
"rsqrt"
);
add_generic_op
<
hip_pow
>
(
"pow"
);
add_generic_op
<
hip_sqdiff
>
(
"sqdiff"
);
add_generic_op
<
hip_relu
>
(
"relu"
);
add_generic_op
<
hip_sign
>
(
"sign"
);
add_extend_op
<
miopen_gemm
,
op
::
dot
>
(
"dot"
);
add_extend_op
<
miopen_contiguous
,
op
::
contiguous
>
(
"contiguous"
);
...
...
src/targets/gpu/relu.cpp
deleted
100644 → 0
View file @
f773c763
#include <migraphx/gpu/relu.hpp>
#include <migraphx/gpu/context.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
miopen_relu
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcasted
();
return
inputs
.
at
(
1
);
}
argument
miopen_relu
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
;
float
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
get_stream
().
get_miopen
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/tanh.cpp
deleted
100644 → 0
View file @
f773c763
#include <migraphx/gpu/tanh.hpp>
#include <migraphx/gpu/context.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
miopen_tanh
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
packed
();
return
inputs
.
at
(
0
);
}
argument
miopen_tanh
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
float
alpha
=
1
;
float
beta
=
0
;
auto
x_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
y_desc
=
make_tensor
(
output_shape
);
miopenActivationForward
(
ctx
.
get_stream
().
get_miopen
(),
ad
.
get
(),
&
alpha
,
x_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
y_desc
.
get
(),
args
[
1
].
implicit
());
return
args
[
1
];
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/tf/tf.cpp
View file @
53ae18ea
...
...
@@ -185,7 +185,7 @@ struct tf_parser
add_mem_op
(
"Pad"
,
&
tf_parser
::
parse_pad
);
add_mem_op
(
"Reshape"
,
&
tf_parser
::
parse_reshape
,
false
);
add_mem_op
(
"Slice"
,
&
tf_parser
::
parse_slice
,
false
);
add_mem_op
(
"Softmax"
,
&
tf_parser
::
parse_softmax
<
op
::
softmax
>
,
false
);
add_mem_op
(
"Softmax"
,
&
tf_parser
::
parse_softmax
<
op
::
softmax
>
);
add_mem_op
(
"Squeeze"
,
&
tf_parser
::
parse_squeeze
,
false
);
add_mem_op
(
"StridedSlice"
,
&
tf_parser
::
parse_stridedslice
,
false
);
add_mem_op
(
"Transpose"
,
&
tf_parser
::
parse_transpose
,
false
);
...
...
@@ -765,6 +765,29 @@ struct tf_parser
}
}
instruction_ref
parse_slice
(
const
std
::
string
&
,
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
slice
op
;
auto
starts
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
size
=
args
[
2
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
axes
=
args
[
0
]
->
get_shape
().
lens
();
size_t
num_axes
=
axes
.
size
();
op
.
starts
=
std
::
vector
<
int64_t
>
(
starts
.
begin
(),
starts
.
end
());
op
.
ends
=
std
::
vector
<
int64_t
>
(
num_axes
);
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
if
(
size
[
i
]
==
-
1
)
op
.
ends
[
i
]
=
axes
[
i
];
else
op
.
ends
[
i
]
=
starts
[
i
]
+
size
[
i
];
}
return
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]));
}
// template to facilitate the logsoftmax later
template
<
class
Op
>
instruction_ref
parse_softmax
(
const
std
::
string
&
,
...
...
@@ -807,29 +830,6 @@ struct tf_parser
return
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]));
}
instruction_ref
parse_slice
(
const
std
::
string
&
,
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
slice
op
;
auto
starts
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
size
=
args
[
2
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
axes
=
args
[
0
]
->
get_shape
().
lens
();
size_t
num_axes
=
axes
.
size
();
op
.
starts
=
std
::
vector
<
int64_t
>
(
starts
.
begin
(),
starts
.
end
());
op
.
ends
=
std
::
vector
<
int64_t
>
(
num_axes
);
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
if
(
size
[
i
]
==
-
1
)
op
.
ends
[
i
]
=
axes
[
i
];
else
op
.
ends
[
i
]
=
starts
[
i
]
+
size
[
i
];
}
return
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]));
}
instruction_ref
parse_stridedslice
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
...
...
test/cpu_ops_test.cpp
View file @
53ae18ea
...
...
@@ -557,6 +557,21 @@ TEST_CASE(sqrt_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
sign_test
)
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
l
=
p
.
add_literal
(
migraphx
::
literal
{
s
,
{
1.02481645
,
0.85643062
,
-
0.03404123
,
-
0.92791926
,
0.0
}});
p
.
add_instruction
(
migraphx
::
op
::
sign
{},
l
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1.0
,
1.0
,
-
1.0
,
-
1.0
,
0.0
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
log_test
)
{
migraphx
::
program
p
;
...
...
test/gpu/miopen.cpp
View file @
53ae18ea
...
...
@@ -268,6 +268,18 @@ struct test_sqrt : verify_program<test_sqrt>
}
};
struct
test_sign
:
verify_program
<
test_sign
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
double_type
,
{
2
,
3
,
4
,
6
}};
auto
param
=
p
.
add_parameter
(
"x"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
sign
{},
param
);
return
p
;
}
};
struct
test_log
:
verify_program
<
test_log
>
{
migraphx
::
program
create_program
()
const
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment