Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
a8eb886b
Commit
a8eb886b
authored
Oct 10, 2022
by
Khalique Ahmed
Browse files
Merge branch 'develop' of
https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
into develop
parents
7e604e9b
4f3cc417
Changes
251
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1013 additions
and
205 deletions
+1013
-205
test/api/test_gpu.cpp
test/api/test_gpu.cpp
+60
-2
test/gpu/adjust_allocation.cpp
test/gpu/adjust_allocation.cpp
+5
-1
test/gpu/jit.cpp
test/gpu/jit.cpp
+2
-0
test/gpu/make_precompile_op.hpp
test/gpu/make_precompile_op.hpp
+66
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+0
-4
test/gpu/pack_int8_args.cpp
test/gpu/pack_int8_args.cpp
+83
-74
test/gpu/stream_sync.cpp
test/gpu/stream_sync.cpp
+146
-0
test/memory_coloring_test.cpp
test/memory_coloring_test.cpp
+1
-1
test/onnx/batch_norm_1d_test.onnx
test/onnx/batch_norm_1d_test.onnx
+35
-0
test/onnx/batch_norm_2d_test.onnx
test/onnx/batch_norm_2d_test.onnx
+35
-0
test/onnx/batch_norm_3d_test.onnx
test/onnx/batch_norm_3d_test.onnx
+44
-0
test/onnx/batch_norm_flat_test.onnx
test/onnx/batch_norm_flat_test.onnx
+34
-0
test/onnx/batch_norm_invalid_bias_rank_test.onnx
test/onnx/batch_norm_invalid_bias_rank_test.onnx
+35
-0
test/onnx/batch_norm_invalid_rank_test.onnx
test/onnx/batch_norm_invalid_rank_test.onnx
+31
-0
test/onnx/batchnorm_1d_test.onnx
test/onnx/batchnorm_1d_test.onnx
+0
-35
test/onnx/batchnorm_3d_test.onnx
test/onnx/batchnorm_3d_test.onnx
+0
-39
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+96
-27
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+148
-22
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+191
-0
test/py/CMakeLists.txt
test/py/CMakeLists.txt
+1
-0
No files found.
test/api/test_gpu.cpp
View file @
a8eb886b
...
...
@@ -25,6 +25,8 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp"
TEST_CASE
(
load_and_run
)
...
...
@@ -44,11 +46,67 @@ TEST_CASE(load_and_run)
{
pp
.
add
(
name
,
migraphx
::
argument
::
generate
(
param_shapes
[
name
]));
}
auto
outputs
=
p
.
eval
(
pp
);
CHECK
(
shapes_before
.
size
()
==
outputs
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
outputs
.
front
().
get_shape
()});
}
using
hip_ptr
=
MIGRAPHX_MANAGE_PTR
(
void
,
hipFree
);
using
stream_ptr
=
MIGRAPHX_MANAGE_PTR
(
hipStream_t
,
hipStreamDestroy
);
stream_ptr
get_stream
()
{
hipStream_t
stream
;
auto
err
=
hipStreamCreateWithFlags
(
&
stream
,
0
);
EXPECT
(
err
==
hipSuccess
);
return
stream_ptr
{
stream
};
}
hip_ptr
get_hip_buffer
(
size_t
size
)
{
void
*
ptr
;
auto
err
=
hipMalloc
(
&
ptr
,
size
);
EXPECT
(
err
==
hipSuccess
);
return
hip_ptr
{
ptr
};
}
TEST_CASE
(
load_and_run_async
)
{
auto
p
=
migraphx
::
parse_onnx
(
"conv_relu_maxpool_test.onnx"
);
auto
shapes_before
=
p
.
get_output_shapes
();
migraphx
::
compile_options
options
;
options
.
set_offload_copy
(
false
);
p
.
compile
(
migraphx
::
target
(
"gpu"
),
options
);
auto
shapes_after
=
p
.
get_output_shapes
();
CHECK
(
shapes_before
.
size
()
==
1
);
CHECK
(
shapes_before
.
size
()
==
shapes_after
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
shapes_after
.
front
()});
migraphx
::
program_parameters
pp
;
auto
param_shapes
=
p
.
get_parameter_shapes
();
stream_ptr
stream
=
get_stream
();
std
::
vector
<
hip_ptr
>
buffs
;
std
::
vector
<
migraphx
::
argument
>
args
;
for
(
auto
&&
name
:
param_shapes
.
names
())
{
args
.
push_back
(
migraphx
::
argument
::
generate
(
param_shapes
[
name
]));
buffs
.
push_back
(
get_hip_buffer
(
args
.
rbegin
()
->
get_shape
().
bytes
()));
auto
err
=
hipMemcpy
(
buffs
.
rbegin
()
->
get
(),
args
.
rbegin
()
->
data
(),
args
.
rbegin
()
->
get_shape
().
bytes
(),
hipMemcpyHostToDevice
);
EXPECT
(
err
==
hipSuccess
);
pp
.
add
(
name
,
migraphx
::
argument
(
args
.
rbegin
()
->
get_shape
(),
buffs
.
rbegin
()
->
get
()));
}
auto
outputs
=
p
.
run_async
(
pp
,
stream
.
get
());
CHECK
(
shapes_before
.
size
()
==
outputs
.
size
());
CHECK
(
bool
{
shapes_before
.
front
()
==
outputs
.
front
().
get_shape
()});
}
TEST_CASE
(
load_and_run_ctx
)
{
auto
p
=
migraphx
::
parse_onnx
(
"conv_relu_maxpool_test.onnx"
);
...
...
@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test)
migraphx
::
program_parameters
pp
;
auto
param_shapes
=
p
.
get_parameter_shapes
();
auto
xs
=
param_shapes
[
"x"
];
std
::
vector
<
float
>
xd
(
xs
.
bytes
()
/
sizeof
(
float
),
1.0
);
std
::
vector
<
float
>
xd
(
xs
.
elements
(
),
1.0
);
pp
.
add
(
"x"
,
migraphx
::
argument
(
xs
,
xd
.
data
()));
auto
ys
=
param_shapes
[
"y"
];
std
::
vector
<
float
>
yd
(
ys
.
bytes
()
/
sizeof
(
float
),
2.0
);
std
::
vector
<
float
>
yd
(
ys
.
elements
(
),
2.0
);
pp
.
add
(
"y"
,
migraphx
::
argument
(
ys
,
yd
.
data
()));
char
ccond
=
cond
;
pp
.
add
(
"cond"
,
migraphx
::
argument
(
param_shapes
[
"cond"
],
&
ccond
));
...
...
test/gpu/adjust_allocation.cpp
View file @
a8eb886b
...
...
@@ -40,6 +40,10 @@
#include <migraphx/make_op.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE
(
"add"
,
"mul"
,
"convert"
)
void
run_lowering
(
migraphx
::
program
&
p
,
bool
offload_copy
=
false
)
{
...
...
@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param)
auto
xb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
to_value
(
s
)}}));
auto
gx
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::copy_to_gpu"
),
x
,
xb
);
auto
ab
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
to_value
(
s
)}}));
auto
sum
=
mm
->
add_instruction
(
m
igraphx
::
mak
e_op
(
"
gpu::
add"
),
gx
,
gx
,
ab
);
auto
sum
=
mm
->
add_instruction
(
m
ake_precompil
e_op
(
"add"
),
gx
,
gx
,
ab
);
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"hip::copy_from_gpu"
),
sum
);
mm
->
add_return
({
r
});
...
...
test/gpu/jit.cpp
View file @
a8eb886b
...
...
@@ -307,12 +307,14 @@ TEST_CASE(compile_math)
"erf(x)"
,
"exp(x)"
,
"floor(x)"
,
"fmod(x, x)"
,
"isnan(x)"
,
"log(x)"
,
"max(x, x)"
,
"min(x, x)"
,
"pow(x, 0)"
,
"pow(x, x)"
,
"remainder(x,x)"
,
"round(x)"
,
"rsqrt(x)"
,
"sin(x)"
,
...
...
src/targets/gpu/device/gelu.c
pp
→
test/gpu/make_precompile_op.h
pp
View file @
a8eb886b
...
...
@@ -21,63 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/device/gelu.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <cmath>
#ifndef MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#define MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
#include <migraphx/operation.hpp>
#include <migraphx/gpu/compiler.hpp>
#include <migraphx/make_op.hpp>
// x * 0.5 * (1.0 + erf(x / sqrt(2.0)))
template
<
class
T
>
auto
gelu_fn
(
T
x
)
__device__
{
return
x
*
0.5
*
(
1
+
::
erf
(
x
*
M_SQRT1_2
));
}
// NOLINTNEXTLINE
#define MIGRAPHX_GPU_TEST_PRECOMPILE(...) \
struct test_compiler : migraphx::gpu::compiler<test_compiler> \
{ \
std::vector<std::string> names() const { return {__VA_ARGS__}; } \
\
template <class... Ts> \
migraphx::operation compile_op(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
\
template <class... Ts> \
migraphx::gpu::compiler_replace compile(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
};
// 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * pow(x, 3))))
template
<
class
T
>
auto
gelu_fn_new
(
T
x
)
__device__
inline
migraphx
::
operation
make_precompile_op
(
migraphx
::
rank
<
0
>
,
const
migraphx
::
operation
&
op
)
{
return
0.5
*
x
*
(
1
+
tanh
(
sqrt
(
M_2_PI
)
*
(
x
+
0.044715
*
x
*
x
*
x
))
);
return
migraphx
::
make_op
(
"gpu::precompile_op"
,
{{
"op"
,
migraphx
::
to_value
(
op
)}}
);
}
void
gelu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
inline
migraphx
::
operation
make_precompile_op
(
migraphx
::
rank
<
1
>
,
const
std
::
string
&
name
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
__device__
{
return
gelu_fn
(
to_hip_type
(
x
));
}
);
return
make_precompile_op
(
migraphx
::
rank
<
0
>
{},
migraphx
::
make_op
(
name
)
);
}
void
gelu_new
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg
)
{
nary
(
stream
,
result
,
arg
)([](
auto
x
)
__device__
{
return
gelu_fn_new
(
to_hip_type
(
x
));
});
}
void
add_gelu
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
)
{
nary
(
stream
,
result
,
arg1
,
arg2
)([](
auto
x
,
auto
y
)
__device__
{
auto
sum
=
to_hip_type
(
x
+
y
);
return
gelu_fn
(
sum
);
});
}
void
add_gelu_new
(
hipStream_t
stream
,
const
argument
&
result
,
const
argument
&
arg1
,
const
argument
&
arg2
)
template
<
class
T
>
auto
make_precompile_op
(
const
T
&
x
)
{
nary
(
stream
,
result
,
arg1
,
arg2
)([](
auto
x
,
auto
y
)
__device__
{
auto
sum
=
to_hip_type
(
x
+
y
);
return
gelu_fn
(
sum
);
});
return
make_precompile_op
(
migraphx
::
rank
<
1
>
{},
x
);
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
test/gpu/mlir.cpp
View file @
a8eb886b
...
...
@@ -37,10 +37,6 @@
#include <migraphx/functional.hpp>
#include <test.hpp>
using
migraphx
::
trim
;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct
mlir_gpu_target
:
migraphx
::
gpu
::
target
{
std
::
string
name
()
const
{
return
"mlir"
;
}
...
...
test/gpu/pack_int8_args.cpp
View file @
a8eb886b
...
...
@@ -30,6 +30,7 @@
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/replace_allocate.hpp>
...
...
@@ -38,10 +39,13 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
void
run_passes
(
migraphx
::
module
&
m
)
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE
(
"add"
,
"mul"
,
"convert"
)
void
run_passes
(
migraphx
::
module
&
m
,
migraphx
::
gpu
::
context
&
ctx
)
{
auto
ctx
=
migraphx
::
gpu
::
context
{};
migraphx
::
run_passes
(
m
,
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
&
ctx
,
false
},
...
...
@@ -52,18 +56,6 @@ void run_passes(migraphx::module& m)
migraphx
::
dead_code_elimination
{}});
}
bool
get_int8_x4_format
()
{
bool
int8_x4_format
=
true
;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
auto
ctx
=
migraphx
::
gpu
::
context
{};
rocblas_gemm_flags
flag
;
rocblas_query_int8_layout_flag
(
ctx
.
get_stream
().
get_rocblas
(),
&
flag
);
int8_x4_format
=
(
flag
==
rocblas_gemm_flags_pack_int8x4
);
#endif
return
int8_x4_format
;
}
TEST_CASE
(
quant_dot
)
{
auto
create_module
=
[]
{
...
...
@@ -102,11 +94,13 @@ TEST_CASE(quant_dot)
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
m2_shape
)}}));
packa
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
l2
,
alloc
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
l1
,
packa
,
gemm_alloc
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
l1
,
packa
,
gemm_alloc
);
auto
beta_broadcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
m3_shape
.
lens
()}}),
beta
);
...
...
@@ -116,19 +110,19 @@ TEST_CASE(quant_dot)
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::contiguous"
),
beta_broadcast
,
beta_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
m3_shape
)}}));
auto
m3_beta
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::add"
),
gemm
,
m3_beta
,
output
);
auto
m3_beta
=
m
.
add_instruction
(
make_precompile_op
(
"mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
make_precompile_op
(
"add"
),
gemm
,
m3_beta
,
output
);
m
.
add_return
({
gemm_add
});
return
m
;
};
auto
m1
=
create_module
();
run_passes
(
m1
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{};
run_passes
(
m1
,
ctx
);
bool
flag
=
get_int8_x4_format
();
auto
m2
=
create_optimized_int8_x4
(
flag
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -187,21 +181,23 @@ TEST_CASE(quant_dot_trans)
// back result to int8
auto
tl1_convert_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
alpha_contiguous
->
get_shape
())}}));
auto
tl1_convert
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}}),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
auto
tl1_convert
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}})),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
tl1_convert
->
get_shape
())}}));
auto
tl1_alpha_int32
=
m
.
add_instruction
(
m
igraphx
::
make_op
(
"gpu::
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
auto
tl1_alpha_int32
=
m
.
add_instruction
(
make_precompile_op
(
"
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
// convert mul_res to int8
auto
tl1_alpha_int8_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
conta
->
get_shape
())}}));
auto
tl1_alpha_int8
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}}),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
tl1_alpha_int8
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}})),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
packb
=
contb
;
if
(
int8_x4
)
...
...
@@ -211,21 +207,24 @@ TEST_CASE(quant_dot_trans)
packb
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
contb
,
allocpb
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
tl1_alpha_int8
,
packb
,
output
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
tl1_alpha_int8
,
packb
,
output
);
m
.
add_return
({
gemm
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -292,11 +291,13 @@ TEST_CASE(quant_dot_pad)
packa
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::int8_gemm_pack_a"
),
pl2
,
alloc
);
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
pl1
,
packa
,
gemm_alloc
);
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
pl1
,
packa
,
gemm_alloc
);
auto
beta_broadcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s3
.
lens
()}}),
beta
);
...
...
@@ -306,18 +307,18 @@ TEST_CASE(quant_dot_pad)
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::contiguous"
),
beta_broadcast
,
beta_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s3
)}}));
auto
m3_beta
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::add"
),
gemm
,
m3_beta
,
output
);
auto
m3_beta
=
m
.
add_instruction
(
make_precompile_op
(
"mul"
),
l3
,
beta_contiguous
,
mul_alloc
);
auto
gemm_add
=
m
.
add_instruction
(
make_precompile_op
(
"add"
),
gemm
,
m3_beta
,
output
);
m
.
add_return
({
gemm_add
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
@@ -396,14 +397,15 @@ TEST_CASE(quant_dot_trans_pad)
// back result to int8
auto
tl1_convert_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
alpha_contiguous
->
get_shape
())}}));
auto
tl1_convert
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}}),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
auto
tl1_convert
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
alpha
->
get_shape
().
type
()}})),
conta
,
tl1_convert_alloc
);
auto
mul_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
tl1_convert
->
get_shape
())}}));
auto
tl1_alpha_int32
=
m
.
add_instruction
(
m
igraphx
::
make_op
(
"gpu::
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
auto
tl1_alpha_int32
=
m
.
add_instruction
(
make_precompile_op
(
"
mul"
),
alpha_contiguous
,
tl1_convert
,
mul_alloc
);
// convert mul_res to int8
auto
tl1_alpha_int8_alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
conta
->
get_shape
())}}));
...
...
@@ -415,10 +417,11 @@ TEST_CASE(quant_dot_trans_pad)
migraphx
::
make_op
(
"hip::allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
ps1
)}}));
}
auto
tl1_alpha_int8
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}}),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
tl1_alpha_int8
=
m
.
add_instruction
(
make_precompile_op
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
conta
->
get_shape
().
type
()}})),
tl1_alpha_int32
,
tl1_alpha_int8_alloc
);
auto
pa
=
tl1_alpha_int8
;
if
(
int8_x4
)
...
...
@@ -438,17 +441,23 @@ TEST_CASE(quant_dot_trans_pad)
}
auto
gemm
=
m
.
add_instruction
(
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
}}),
pa
,
packb
,
output
);
migraphx
::
make_op
(
"gpu::quant_gemm"
,
{{
"int8_x4_format"
,
int8_x4
},
{
"compute_fp32"
,
migraphx
::
gpu
::
get_compute_fp32_flag
()}}),
pa
,
packb
,
output
);
m
.
add_return
({
gemm
});
return
m
;
};
auto
m1
=
create_module
();
bool
flag
=
get_int8_x4_format
()
;
auto
m2
=
create_optimized_int8_x4
(
flag
);
auto
m1
=
create_module
();
auto
ctx
=
migraphx
::
gpu
::
context
{}
;
run_passes
(
m1
,
ctx
);
run_passes
(
m1
);
bool
int8_x4
=
migraphx
::
gpu
::
get_int8_x4_format
(
ctx
);
auto
m2
=
create_optimized_int8_x4
(
int8_x4
);
EXPECT
(
m1
==
m2
);
}
...
...
test/gpu/stream_sync.cpp
0 → 100644
View file @
a8eb886b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/gpu/target.hpp>
#include "test.hpp"
using
hip_stream_ptr
=
MIGRAPHX_MANAGE_PTR
(
hipStream_t
,
hipStreamDestroy
);
constexpr
uint32_t
stream_sync_test_val
=
1337
;
// NOLINTNEXTLINE
const
std
::
string
compare_numbers
=
R"__migraphx__(
#include <hip/hip_runtime.h>
extern "C" {
__global__ void compare(float* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (data[i] != 1337)
{
abort();
}
}
}
int main() {}
)__migraphx__"
;
migraphx
::
src_file
make_src_file
(
const
std
::
string
&
name
,
const
std
::
string
&
content
)
{
return
{
name
,
std
::
make_pair
(
content
.
data
(),
content
.
data
()
+
content
.
size
())};
}
hip_stream_ptr
get_stream
()
{
hipStream_t
stream
;
auto
status
=
hipStreamCreate
(
&
stream
);
if
(
status
!=
hipSuccess
)
{
MIGRAPHX_THROW
(
"Failed to get stream"
);
}
return
hip_stream_ptr
{
stream
};
}
TEST_CASE
(
test_stream_sync_compare_kernel
)
{
auto
binaries
=
migraphx
::
gpu
::
compile_hip_src
(
{
make_src_file
(
"check_stuff.cpp"
,
compare_numbers
)},
""
,
migraphx
::
gpu
::
get_device_name
());
EXPECT
(
binaries
.
size
()
==
1
);
migraphx
::
gpu
::
kernel
k1
{
binaries
.
front
(),
"compare"
};
auto
input
=
migraphx
::
fill_argument
({
migraphx
::
shape
::
float_type
,
{
128
}},
stream_sync_test_val
);
auto
ginput
=
migraphx
::
gpu
::
to_gpu
(
input
);
hip_stream_ptr
pstream
=
get_stream
();
k1
.
launch
(
pstream
.
get
(),
input
.
get_shape
().
elements
(),
1024
)(
ginput
.
cast
<
float
>
());
auto
output
=
migraphx
::
gpu
::
from_gpu
(
ginput
);
EXPECT
(
output
==
input
);
}
TEST_CASE
(
test_stream_sync
)
{
auto
binaries
=
migraphx
::
gpu
::
compile_hip_src
(
{
make_src_file
(
"check_stuff.cpp"
,
compare_numbers
)},
""
,
migraphx
::
gpu
::
get_device_name
());
EXPECT
(
binaries
.
size
()
==
1
);
migraphx
::
gpu
::
kernel
k1
{
binaries
.
front
(),
"compare"
};
const
unsigned
int
m
=
128
;
const
unsigned
int
k
=
8192
;
// Setup empty GPU memory buffer
migraphx
::
shape
input_shape
{
migraphx
::
shape
::
float_type
,
{
m
,
k
}};
migraphx
::
shape
output_shape
{
migraphx
::
shape
::
float_type
,
{
m
,
m
}};
auto
input
=
migraphx
::
fill_argument
(
input_shape
,
0
);
auto
ginput
=
migraphx
::
gpu
::
to_gpu
(
input
);
auto
output
=
migraphx
::
fill_argument
(
output_shape
,
0
);
auto
goutput
=
migraphx
::
gpu
::
to_gpu
(
output
);
hip_stream_ptr
pstream
=
get_stream
();
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
m
,
k
}});
auto
y
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
k
,
m
}}));
std
::
vector
<
float
>
data
(
m
*
m
,
stream_sync_test_val
);
auto
test_val
=
mm
->
add_literal
(
output_shape
,
data
);
auto
mult_out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
mult_out
,
test_val
);
p
.
compile
(
migraphx
::
gpu
::
target
{});
// Run network and then verify with kernel
auto
args
=
p
.
eval
({{
"x"
,
ginput
},
{
"output"
,
goutput
}},
{
pstream
.
get
(),
true
});
k1
.
launch
(
pstream
.
get
(),
m
*
m
,
1024
)(
goutput
.
cast
<
float
>
());
output
=
migraphx
::
gpu
::
from_gpu
(
goutput
);
EXPECT
(
output
!=
input
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/memory_coloring_test.cpp
View file @
a8eb886b
...
...
@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto
sub_modules
=
p
.
get_modules
();
std
::
reverse
(
sub_modules
.
begin
(),
sub_modules
.
end
());
for
(
auto
&
smod
:
sub_modules
)
for
(
const
auto
&
smod
:
sub_modules
)
{
run_pass
(
*
smod
);
}
...
...
test/onnx/batch_norm_1d_test.onnx
0 → 100644
View file @
a8eb886b
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_2d_test.onnx
0 → 100644
View file @
a8eb886b
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_3d_test.onnx
0 → 100644
View file @
a8eb886b
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_flat_test.onnx
0 → 100644
View file @
a8eb886b
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_invalid_bias_rank_test.onnx
0 → 100644
View file @
a8eb886b
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batch_norm_invalid_rank_test.onnx
0 → 100644
View file @
a8eb886b
batch_norm_invalid_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
x
Z
scale
Z
bias
Z
mean
Z
variance
b
y
B
\ No newline at end of file
test/onnx/batchnorm_1d_test.onnx
deleted
100644 → 0
View file @
7e604e9b
batchnorm_1d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_1d_testZ
0
Z
1
Z
2
Z
3
Z
4
b
5
B
\ No newline at end of file
test/onnx/batchnorm_3d_test.onnx
deleted
100644 → 0
View file @
7e604e9b
batchnorm_3d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_3d_testZ
0
Z
1
Z
2
Z
3
Z
4
b
5
B
\ No newline at end of file
test/onnx/gen_onnx.py
View file @
a8eb886b
...
...
@@ -314,38 +314,107 @@ def averagepool_same_upper_test():
@
onnx_test
def
batchnorm_
1d
_test
():
x
=
helper
.
make_tensor_value_info
(
'
0
'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
])
scale
=
helper
.
make_tensor_value_info
(
'
1
'
,
TensorProto
.
FLOAT
,
[
3
])
bias
=
helper
.
make_tensor_value_info
(
'
2
'
,
TensorProto
.
FLOAT
,
[
3
])
mean
=
helper
.
make_tensor_value_info
(
'
3
'
,
TensorProto
.
FLOAT
,
[
3
])
var
=
helper
.
make_tensor_value_info
(
'
4
'
,
TensorProto
.
FLOAT
,
[
3
])
out
=
helper
.
make_tensor_value_info
(
'
5
'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'0'
,
'1'
,
'2'
,
'3'
,
'4'
]
,
outputs
=
[
'5
'
],
epsilon
=
1e-6
,
momentum
=
0.9
)
def
batch
_
norm_
flat
_test
():
x
=
helper
.
make_tensor_value_info
(
'
x
'
,
TensorProto
.
FLOAT
,
[
1
0
])
scale
=
helper
.
make_tensor_value_info
(
'
scale
'
,
TensorProto
.
FLOAT
,
[
1
])
bias
=
helper
.
make_tensor_value_info
(
'
bias
'
,
TensorProto
.
FLOAT
,
[
1
])
mean
=
helper
.
make_tensor_value_info
(
'
mean
'
,
TensorProto
.
FLOAT
,
[
1
])
var
=
helper
.
make_tensor_value_info
(
'
variance
'
,
TensorProto
.
FLOAT
,
[
1
])
out
=
helper
.
make_tensor_value_info
(
'
y
'
,
TensorProto
.
FLOAT
,
[
1
0
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance
'
],
outputs
=
[
'y'
]
,
epsilon
=
1e-6
)
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batchnorm_3d_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
,
5
,
5
])
scale
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
bias
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
3
])
mean
=
helper
.
make_tensor_value_info
(
'3'
,
TensorProto
.
FLOAT
,
[
3
])
var
=
helper
.
make_tensor_value_info
(
'4'
,
TensorProto
.
FLOAT
,
[
3
])
out
=
helper
.
make_tensor_value_info
(
'5'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'0'
,
'1'
,
'2'
,
'3'
,
'4'
],
outputs
=
[
'5'
],
epsilon
=
1e-6
,
momentum
=
0.9
)
def
batch_norm_1d_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
2
,
3
,
4
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
3
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
3
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
3
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
3
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
2
,
3
,
4
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_2d_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
3
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
3
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
3
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
3
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_3d_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
2
,
2
,
2
,
2
,
2
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT16
,
[
2
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT16
,
[
2
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT16
,
[
2
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT16
,
[
2
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT16
,
[
2
,
2
,
2
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
],
epsilon
=
1e-6
)
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_invalid_rank_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
8
,
8
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
8
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
8
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
8
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
8
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
8
,
8
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_invalid_bias_rank_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
3
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
3
,
1
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
3
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
3
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
...
...
test/onnx/onnx_test.cpp
View file @
a8eb886b
...
...
@@ -369,36 +369,134 @@ TEST_CASE(averagepool_same_upper_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchnorm_
1d
_test
)
TEST_CASE
(
batch
_
norm_
flat
_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
l0
,
l1
,
l2
,
l3
,
l4
);
auto
prog
=
optimize_onnx
(
"batchnorm_1d_test.onnx"
);
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
10
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-6
f
}});
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_flat_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchnorm_
3
d_test
)
TEST_CASE
(
batch
_
norm_
1
d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
l4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
l0
,
l1
,
l2
,
l3
,
l4
);
auto
prog
=
optimize_onnx
(
"batchnorm_3d_test.onnx"
);
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_1d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_2d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_2d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
half_type
,
{
2
,
2
,
2
,
2
,
2
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
half_type
,
{
2
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
1e-6
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
,
3
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_3d_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_invalid_rank
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"batch_norm_invalid_rank.onnx"
);
}));
}
TEST_CASE
(
batch_norm_invalid_bias_rank
)
{
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"batch_norm_invalid_bias_rank.onnx"
);
}));
}
TEST_CASE
(
cast_test
)
{
migraphx
::
program
p
;
...
...
@@ -791,18 +889,46 @@ TEST_CASE(conv_bn_relu_maxpool_test)
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
}});
auto
l2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p5
=
mm
->
add_parameter
(
"5"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p6
=
mm
->
add_parameter
(
"6"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p3
=
mm
->
add_parameter
(
"3"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p4
=
mm
->
add_parameter
(
"4"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p5
=
mm
->
add_parameter
(
"5"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
p6
=
mm
->
add_parameter
(
"6"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
uint64_t
axis
=
1
;
auto
l3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
l0
,
l1
);
auto
l4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
axis
},
{
"out_lens"
,
l3
->
get_shape
().
lens
()}}),
l2
);
auto
l5
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
l3
,
l4
);
auto
l6
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1.0e-5
f
}}),
l5
,
p3
,
p4
,
p5
,
p6
);
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p3
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p4
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p5
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
p6
);
auto
mb_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_mean
);
auto
numer
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sub"
),
l5
,
mb_mean
);
auto
mb_eps
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
1
}}}),
eps
);
auto
var_eps
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
usq_var
,
mb_eps
);
auto
mb_rt
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
1
}}}),
rt
);
auto
denom
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pow"
),
var_eps
,
mb_rt
);
auto
mb_denom
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
denom
);
auto
div0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
numer
,
mb_denom
);
auto
mb_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_scale
);
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
div0
,
mb_scale
);
auto
mb_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
1
,
28
,
28
}}}),
usq_bias
);
auto
l6
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
r0
,
mb_bias
);
auto
l7
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
l6
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
...
...
test/onnx/verify_onnx.cpp
View file @
a8eb886b
...
...
@@ -30,6 +30,7 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/half.hpp>
#include "test.hpp"
TEST_CASE
(
averagepool_notset_test
)
...
...
@@ -68,6 +69,196 @@ TEST_CASE(averagepool_nt_cip_test)
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_flat_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_flat_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
10
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
1
});
std
::
vector
<
float
>
x_data
=
{
1.6524342
,
-
0.51048076
,
0.32543048
,
2.4410043
,
2.0833702
,
0.44981122
,
1.0044622
,
-
0.24006313
,
-
0.43065986
,
0.07626268
};
std
::
vector
<
float
>
scale_data
=
{
-
0.02927135
};
std
::
vector
<
float
>
bias_data
=
{
0.42347777
};
std
::
vector
<
float
>
mean_data
=
{
-
0.00449735
};
std
::
vector
<
float
>
variance_data
=
{
0.5184545
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.35612
,
0.44404706
,
0.4100655
,
0.32406294
,
0.33860153
,
0.40500915
,
0.38246143
,
0.43305403
,
0.4408022
,
0.42019472
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_1d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_1d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
std
::
vector
<
float
>
tmp
=
{
1.652
,
-
0.5103
,
0.3254
,
2.441
,
2.084
,
0.4497
,
1.005
,
-
0.2401
,
-
0.4307
,
0.07623
,
-
0.02927
,
0.4236
,
-
0.004498
,
-
0.4282
,
-
0.5527
,
0.02205
,
-
1.472
,
-
1.7295
,
0.796
,
0.9507
,
0.2312
,
0.664
,
-
0.06964
,
1.035
};
std
::
vector
<
migraphx
::
half
>
x_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
std
::
vector
<
float
>
scale_data
=
{
-
1.336926
,
-
1.0679098
,
0.10368501
};
std
::
vector
<
float
>
bias_data
=
{
0.20240043
,
-
0.70175606
,
-
0.8859727
};
std
::
vector
<
float
>
mean_data
=
{
0.30854642
,
-
0.36574763
,
-
0.9463552
};
std
::
vector
<
float
>
variance_data
=
{
0.43428132
,
0.97773486
,
0.30332062
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
-
2.523
,
1.863
,
0.1681
,
-
4.125
,
-
3.348
,
-
1.582
,
-
2.182
,
-
0.8374
,
-
0.789
,
-
0.6934
,
-
0.7134
,
-
0.628
,
0.8374
,
1.697
,
1.949
,
0.7837
,
0.4927
,
0.771
,
-
1.956
,
-
2.123
,
-
0.664
,
-
0.583
,
-
0.7207
,
-
0.5127
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_2d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_2d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
std
::
vector
<
float
>
x_data
=
{
1.6524342
,
-
0.51048076
,
0.32543048
,
2.4410043
,
2.0833702
,
0.44981122
,
1.0044622
,
-
0.24006313
,
-
0.43065986
,
0.07626268
,
-
0.02927135
,
0.42347777
,
-
0.00449735
,
-
0.4281568
,
-
0.5527635
,
0.02204161
,
-
1.4719028
,
-
1.7298799
,
0.79596406
,
0.9505461
,
0.23115851
,
0.6639593
,
-
0.06963254
,
1.0348768
,
-
1.336926
,
-
1.0679098
,
0.10368501
,
0.20240043
,
-
0.70175606
,
-
0.8859727
,
0.30854642
,
-
0.36574763
,
-
0.9463552
,
0.9476916
,
0.37686515
,
-
0.05184272
,
-
0.7151244
,
-
0.37341377
,
0.59440356
,
0.10051094
,
-
0.20755945
,
0.9098465
,
1.1664004
,
1.4075205
,
-
1.1522529
,
-
0.34607422
,
0.32027543
,
-
0.6885485
,
0.5404544
,
0.10012514
,
0.8767704
,
1.0032021
,
-
1.2755303
,
0.23577735
,
0.74239916
,
1.0146079
,
0.60875916
,
-
0.29163074
,
1.4872868
,
0.20466477
,
-
0.26367408
,
-
0.56394804
,
-
0.56043875
,
0.7763664
,
-
0.9626441
,
0.29653943
,
-
3.2231965
,
0.03322164
,
0.03402911
,
0.77308357
,
-
0.0654009
,
-
0.30463725
,
0.22182712
,
-
0.22594836
,
-
0.5807543
,
-
0.22390617
,
-
0.24484141
,
-
2.0761833
,
1.8459716
,
0.2455878
,
0.99913245
,
-
0.9266217
,
-
0.1938893
,
0.6417983
,
-
1.0880078
,
0.49565446
,
2.1584804
,
1.2276239
,
3.3091128
,
0.14217089
,
0.9425477
,
0.07578196
,
0.4067431
,
0.71984154
,
-
0.20796849
,
0.90003085
};
std
::
vector
<
float
>
scale_data
=
{
0.658487
,
0.03700604
,
2.463201
};
std
::
vector
<
float
>
bias_data
=
{
0.03497279
,
0.17080553
,
0.5636415
};
std
::
vector
<
float
>
mean_data
=
{
0.1954783
,
0.6203974
,
0.8116831
};
std
::
vector
<
float
>
variance_data
=
{
0.30558077
,
0.04536599
,
0.05461315
};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1.77046824e+00
,
-
8.05950999e-01
,
1.89769119e-01
,
2.70979643e+00
,
2.28379035e+00
,
3.37928861e-01
,
9.98617530e-01
,
-
4.83835101e-01
,
-
7.10869908e-01
,
-
1.07034385e-01
,
-
2.32744321e-01
,
3.06560963e-01
,
-
2.03234047e-01
,
-
7.07888365e-01
,
-
8.56317282e-01
,
-
1.71621382e-01
,
-
1.92677066e-01
,
-
2.37493858e-01
,
2.01305658e-01
,
2.28160262e-01
,
1.03185430e-01
,
1.78373277e-01
,
5.09308279e-02
,
2.42810518e-01
,
-
1.69228360e-01
,
-
1.22493818e-01
,
8.10402334e-02
,
9.81894583e-02
,
-
5.88841513e-02
,
-
9.08869803e-02
,
1.16629556e-01
,
-
5.11445105e-04
,
-
1.79648399e+01
,
1.99707508e+00
,
-
4.01903248e+00
,
-
8.53731060e+00
,
-
1.55278311e+01
,
-
1.19264421e+01
,
-
1.72633123e+00
,
-
6.93161058e+00
,
-
1.01784554e+01
,
1.59821415e+00
,
4.30211163e+00
,
6.84334660e+00
,
-
2.01348572e+01
,
-
1.16383028e+01
,
-
4.61544800e+00
,
-
1.52477398e+01
,
4.45901126e-01
,
-
7.86099210e-02
,
8.46513629e-01
,
9.97116446e-01
,
-
1.71726203e+00
,
8.29761624e-02
,
6.86453462e-01
,
1.01070285e+00
,
5.27264357e-01
,
-
5.45261383e-01
,
1.57374811e+00
,
4.59154993e-02
,
-
5.11959970e-01
,
-
8.69639993e-01
,
-
8.65459919e-01
,
7.26914644e-01
,
-
1.04206637e-01
,
1.14543661e-01
,
-
4.96918678e-01
,
6.87990561e-02
,
6.89393356e-02
,
1.97330773e-01
,
5.16659655e-02
,
1.01048872e-02
,
1.01564340e-01
,
2.37750299e-02
,
-
3.78632471e-02
,
2.41298079e-02
,
2.04928555e-02
,
-
2.97655046e-01
,
3.83717060e-01
,
1.05692141e-01
,
2.53922558e+00
,
-
1.77568626e+01
,
-
1.00343809e+01
,
-
1.22682428e+00
,
-
1.94577579e+01
,
-
2.76707697e+00
,
1.47579327e+01
,
4.94736385e+00
,
2.68847847e+01
,
-
6.49254417e+00
,
1.94286156e+00
,
-
7.19223642e+00
,
-
3.70413971e+00
,
-
4.04303551e-01
,
-
1.01827660e+01
,
1.49476433e+00
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_3d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
2
,
2
,
2
,
2
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
half_type
,
{
2
});
// using migraphx::half copy conversion since it doesn't have initializer_list constructor
std
::
vector
<
float
>
tmp
=
{
5.
,
5.
,
8.
,
7.
,
3.
,
4.
,
1.
,
7.
,
5.
,
5.
,
9.
,
4.
,
7.
,
2.
,
2.
,
2.
,
6.
,
1.
,
4.
,
9.
,
2.
,
8.
,
0.
,
2.
,
1.
,
4.
,
8.
,
8.
,
3.
,
3.
,
0.
,
8.
};
std
::
vector
<
migraphx
::
half
>
x_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
1.
,
1.
};
std
::
vector
<
migraphx
::
half
>
scale_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
0.
,
0.
,
};
std
::
vector
<
migraphx
::
half
>
bias_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
-
0.75
,
0.29
};
std
::
vector
<
migraphx
::
half
>
mean_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
tmp
=
{
0.31
,
0.37
};
std
::
vector
<
migraphx
::
half
>
variance_data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
10.33
,
10.33
,
15.71
,
13.914
,
6.734
,
8.53
,
3.143
,
13.914
,
7.742
,
7.742
,
14.32
,
6.098
,
11.03
,
2.81
,
2.81
,
2.81
,
12.125
,
3.143
,
8.53
,
17.52
,
4.938
,
15.71
,
1.347
,
4.938
,
1.167
,
6.098
,
12.67
,
12.67
,
4.453
,
4.453
,
-
0.4768
,
12.67
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
celu_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"celu_verify_test.onnx"
);
...
...
test/py/CMakeLists.txt
View file @
a8eb886b
...
...
@@ -56,4 +56,5 @@ add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test
(
gpu test_gpu.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
array test_array.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
backend onnx_backend_test.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu_async test_gpu_async.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
endif
()
Prev
1
…
8
9
10
11
12
13
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment