Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
32b83c9c
Commit
32b83c9c
authored
Sep 25, 2023
by
Khalique Ahmed
Browse files
Merge branch 'develop' of
https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
into inner_bcast_fix
parents
92f5a6cd
434a06cf
Changes
291
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
370 additions
and
162 deletions
+370
-162
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-0
src/targets/gpu/time_op.cpp
src/targets/gpu/time_op.cpp
+1
-1
src/targets/ref/lowering.cpp
src/targets/ref/lowering.cpp
+1
-1
src/tf/tf_parser.cpp
src/tf/tf_parser.cpp
+1
-1
src/value.cpp
src/value.cpp
+1
-1
src/verify_args.cpp
src/verify_args.cpp
+1
-11
test/CMakeLists.txt
test/CMakeLists.txt
+26
-102
test/api/test_cpu.cpp
test/api/test_cpu.cpp
+3
-3
test/check_shapes_test.cpp
test/check_shapes_test.cpp
+30
-15
test/common_dims.cpp
test/common_dims.cpp
+65
-0
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+34
-2
test/eliminate_pad_test.cpp
test/eliminate_pad_test.cpp
+3
-4
test/float_equal.cpp
test/float_equal.cpp
+3
-0
test/fuse_pointwise.cpp
test/fuse_pointwise.cpp
+152
-1
test/gpu/jit.cpp
test/gpu/jit.cpp
+9
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+7
-7
test/gpu/quantization.cpp
test/gpu/quantization.cpp
+2
-2
test/include/test.hpp
test/include/test.hpp
+22
-5
test/inline_module_test.cpp
test/inline_module_test.cpp
+0
-1
test/insert_pad_test.cpp
test/insert_pad_test.cpp
+6
-5
No files found.
src/targets/gpu/target.cpp
View file @
32b83c9c
...
...
@@ -48,6 +48,7 @@
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/schedule.hpp>
#include <migraphx/simplify_dyn_ops.hpp>
#include <migraphx/simplify_qdq.hpp>
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/split_single_dyn_dim.hpp>
...
...
@@ -109,6 +110,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
{
split_single_dyn_dim
{},
dead_code_elimination
{},
simplify_dyn_ops
{},
dead_code_elimination
{},
normalize_ops
{},
dead_code_elimination
{},
simplify_qdq
{},
...
...
src/targets/gpu/time_op.cpp
View file @
32b83c9c
...
...
@@ -34,7 +34,7 @@ namespace gpu {
std
::
vector
<
argument
>
generate_arguments
(
const
std
::
vector
<
shape
>&
shapes
,
unsigned
long
seed
=
0
)
{
std
::
vector
<
argument
>
args
;
std
::
transform
(
shapes
.
begin
(),
shapes
.
end
(),
std
::
back_inserter
(
args
),
[
&
](
auto
&
s
)
{
std
::
transform
(
shapes
.
begin
(),
shapes
.
end
(),
std
::
back_inserter
(
args
),
[
&
](
const
auto
&
s
)
{
return
to_gpu
(
generate_argument
(
s
,
seed
++
));
});
return
args
;
...
...
src/targets/ref/lowering.cpp
View file @
32b83c9c
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
...
...
src/tf/tf_parser.cpp
View file @
32b83c9c
...
...
@@ -338,7 +338,7 @@ void tf_parser::parse_node(const std::string& name)
std
::
string
input_name
=
input
;
// if input has trailing `:0` index then remove it
auto
multi_out_idx
=
input
.
find
(
':'
);
if
(
multi_out_idx
!=
std
::
string
::
npos
&&
input
.
substr
(
multi_out_idx
+
1
)
==
"0"
)
if
(
multi_out_idx
!=
std
::
string
::
npos
and
input
.
substr
(
multi_out_idx
+
1
)
==
"0"
)
{
input_name
=
input
.
substr
(
0
,
multi_out_idx
);
}
...
...
src/value.cpp
View file @
32b83c9c
...
...
@@ -285,7 +285,7 @@ bool value::contains(const std::string& pkey) const
}
std
::
size_t
value
::
size
()
const
{
auto
*
a
=
if_array_impl
(
x
);
const
auto
*
a
=
if_array_impl
(
x
);
if
(
a
==
nullptr
)
return
0
;
return
a
->
size
();
...
...
src/verify_args.cpp
View file @
32b83c9c
...
...
@@ -78,16 +78,6 @@ bool verify_args(const std::string& name,
if
(
verify
::
range_zero
(
target
))
std
::
cout
<<
"Target data is all zeros"
<<
std
::
endl
;
// auto mxdiff = max_diff(ref, target);
// std::cout << "Max diff: " << mxdiff << std::endl;
// auto idx = mismatch_idx(ref, target, float_equal);
// if(idx < verify::range_distance(ref))
// {
// std::cout << "Mismatch at " << idx << ": " << ref[idx] << " != " << target[idx]
// << std::endl;
// }
auto
ref_nan_idx
=
find_idx
(
ref
,
verify
::
not_finite
);
if
(
ref_nan_idx
>=
0
)
std
::
cout
<<
"Non finite number found in ref at "
<<
ref_nan_idx
<<
": "
...
...
@@ -97,7 +87,7 @@ bool verify_args(const std::string& name,
if
(
target_nan_idx
>=
0
)
std
::
cout
<<
"Non finite number found in target at "
<<
target_nan_idx
<<
": "
<<
target
[
target_nan_idx
]
<<
std
::
endl
;
//
std::cout << std::endl;
std
::
cout
<<
"MIGraphX verification passed successfully."
<<
std
::
endl
;
}
});
return
passed
;
...
...
test/CMakeLists.txt
View file @
32b83c9c
...
...
@@ -25,98 +25,19 @@
cmake_policy
(
SET CMP0057 NEW
)
find_package
(
Threads REQUIRED
)
include
(
ProcessorCount
)
ProcessorCount
(
N
)
set
(
CTEST_PARALLEL_LEVEL
${
N
}
CACHE STRING
"CTest parallel level"
)
add_custom_target
(
check COMMAND
${
CMAKE_CTEST_COMMAND
}
--output-on-failure -j
${
CTEST_PARALLEL_LEVEL
}
-C
${
CMAKE_CFG_INTDIR
}
--timeout 5000
)
add_custom_target
(
tests
)
find_program
(
MIGRAPHX_GDB gdb
)
if
(
MIGRAPHX_GDB
)
set
(
MIGRAPHX_TEST_GDB On CACHE BOOL
""
)
else
()
set
(
MIGRAPHX_TEST_GDB Off CACHE BOOL
""
)
endif
()
set
(
SKIP_TESTS
)
function
(
add_test_command NAME EXE
)
if
(
NAME IN_LIST SKIP_TESTS
)
add_test
(
NAME
${
NAME
}
COMMAND echo skipped
)
set_tests_properties
(
${
NAME
}
PROPERTIES DISABLED On
)
elseif
(
WIN32
)
set
(
WINPATH
)
foreach
(
PATH
${
CMAKE_FIND_ROOT_PATH
}
)
list
(
APPEND WINPATH
${
PATH
}
/bin
)
endforeach
()
file
(
GENERATE OUTPUT
"
${
CMAKE_CURRENT_BINARY_DIR
}
/test_
${
NAME
}
.cmd"
CONTENT
"set PATH=
${
WINPATH
}
;%PATH%
%1
${
ARGN
}
"
)
add_test
(
NAME
${
NAME
}
COMMAND
${
WINE_CMD
}
cmd /c
"
${
CMAKE_CURRENT_BINARY_DIR
}
/test_
${
NAME
}
.cmd"
$<TARGET_FILE:
${
EXE
}
>
)
else
()
if
(
MIGRAPHX_TEST_GDB
)
# add_test(NAME ${NAME} COMMAND ${MIGRAPHX_GDB}
# --batch
# --return-child-result
# -ex "set disable-randomization off"
# -ex run
# -ex backtrace
# --args $<TARGET_FILE:${EXE}> ${ARGN})
set
(
TEST_DIR
${
CMAKE_CURRENT_BINARY_DIR
}
/gdb/test_
${
NAME
}
)
file
(
MAKE_DIRECTORY
${
TEST_DIR
}
)
if
(
NOT EXISTS
${
TEST_DIR
}
)
message
(
FATAL_ERROR
"Failed to create test directory:
${
TEST_DIR
}
"
)
endif
()
file
(
GENERATE OUTPUT
"
${
TEST_DIR
}
/run.cmake"
CONTENT
"
# Remove previous core dump
file(REMOVE
${
TEST_DIR
}
/core)
execute_process(COMMAND $<TARGET_FILE:
${
EXE
}
>
${
ARGN
}
WORKING_DIRECTORY
${
TEST_DIR
}
RESULT_VARIABLE RESULT)
if(NOT RESULT EQUAL 0)
# TODO: check for core files based on pid when setting /proc/sys/kernel/core_uses_pid
if(EXISTS
${
TEST_DIR
}
/core)
set(
\$
ENV{UBSAN_OPTIONS} print_stacktrace=1)
set(
\$
ENV{ASAN_OPTIONS} print_stacktrace=1)
execute_process(COMMAND
${
MIGRAPHX_GDB
}
$<TARGET_FILE:
${
EXE
}
>
${
TEST_DIR
}
/core -batch -ex bt)
endif()
message(FATAL_ERROR
\"
Test failed
\"
)
endif()
"
)
add_test
(
NAME
${
NAME
}
COMMAND
${
CMAKE_COMMAND
}
-P
"
${
TEST_DIR
}
/run.cmake"
)
else
()
add_test
(
NAME
${
NAME
}
COMMAND
${
EXE
}
${
ARGN
}
)
endif
()
endif
()
rocm_test_link_libraries
(
Threads::Threads migraphx migraphx_ref migraphx_onnx migraphx_tf
)
rocm_test_include_directories
(
include
)
set_tests_properties
(
${
NAME
}
PROPERTIES FAIL_REGULAR_EXPRESSION
"FAILED"
)
endfunction
()
function
(
add_test_executable TEST_NAME
)
add_executable
(
${
TEST_NAME
}
EXCLUDE_FROM_ALL
${
ARGN
}
)
target_link_libraries
(
${
TEST_NAME
}
${
CMAKE_THREAD_LIBS_INIT
}
)
# Cmake does not add flags correctly for gcc
if
(
CMAKE_CXX_COMPILER_ID MATCHES
"GNU"
)
set_target_properties
(
${
TEST_NAME
}
PROPERTIES COMPILE_FLAGS -pthread LINK_FLAGS -pthread
)
endif
()
set
(
TEST_COMMAND
${
TEST_NAME
}
)
add_test_command
(
${
TEST_NAME
}
${
TEST_COMMAND
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
endfunction
(
add_test_executable
)
set
(
MIGRAPHX_DISABLE_LARGE_BUFFER_TESTS Off CACHE BOOL
""
)
if
(
MIGRAPHX_DISABLE_LARGE_BUFFER_TESTS
)
add_compile_definitions
(
MIGRAPHX_DISABLE_LARGE_BUFFER_TESTS
)
endif
()
file
(
GLOB TESTS CONFIGURE_DEPENDS *.cpp
)
foreach
(
TEST
${
TESTS
}
)
get_filename_component
(
BASE_NAME
${
TEST
}
NAME_WE
)
add_test_executable
(
test_
${
BASE_NAME
}
${
TEST
}
)
rocm_
add_test_executable
(
test_
${
BASE_NAME
}
${
TEST
}
)
rocm_clang_tidy_check
(
test_
${
BASE_NAME
}
)
endforeach
()
...
...
@@ -126,7 +47,7 @@ if(MIGRAPHX_ENABLE_GPU)
foreach
(
TEST
${
GPU_TESTS
}
)
get_filename_component
(
BASE_NAME
${
TEST
}
NAME_WE
)
add_test_executable
(
test_gpu_
${
BASE_NAME
}
${
TEST
}
)
rocm_
add_test_executable
(
test_gpu_
${
BASE_NAME
}
${
TEST
}
)
rocm_clang_tidy_check
(
test_gpu_
${
BASE_NAME
}
)
set_tests_properties
(
test_gpu_
${
BASE_NAME
}
PROPERTIES
COST 10
...
...
@@ -145,7 +66,7 @@ if(MIGRAPHX_ENABLE_FPGA)
foreach
(
TEST
${
FPGA_TESTS
}
)
get_filename_component
(
BASE_NAME
${
TEST
}
NAME_WE
)
add_test_executable
(
test_fpga_
${
BASE_NAME
}
${
TEST
}
)
rocm_
add_test_executable
(
test_fpga_
${
BASE_NAME
}
${
TEST
}
)
rocm_clang_tidy_check
(
test_fpga_
${
BASE_NAME
}
)
set_tests_properties
(
test_fpga_
${
BASE_NAME
}
PROPERTIES
COST 10
...
...
@@ -167,22 +88,21 @@ foreach(ONNX_TEST ${ONNX_TESTS})
target_link_libraries
(
${
TEST_NAME
}
migraphx_onnx migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
add_test
(
NAME
${
TEST_NAME
}
COMMAND $<TARGET_FILE:
${
TEST_NAME
}
> WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
rocm_mark_as_test
(
${
TEST_NAME
}
)
endforeach
()
# tf test
set
(
TEST_TF_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
/tf
)
add_executable
(
test_tf tf/tf_test.cpp
)
rocm_mark_as_test
(
test_tf
)
rocm_clang_tidy_check
(
test_tf
)
target_link_libraries
(
test_tf migraphx_tf
)
target_include_directories
(
test_tf PUBLIC include
)
add_test
(
NAME test_tf COMMAND $<TARGET_FILE:test_tf> WORKING_DIRECTORY
${
TEST_TF_DIR
}
)
add_dependencies
(
tests test_tf
)
add_dependencies
(
check test_tf
)
add_subdirectory
(
api
)
add_subdirectory
(
verify
)
add_subdirectory
(
ref
)
if
(
MIGRAPHX_ENABLE_PYTHON
)
add_subdirectory
(
py
)
...
...
@@ -201,20 +121,24 @@ if(MIGRAPHX_ENABLE_GPU AND MIGRAPHX_ENABLE_CPU AND MIGRAPHX_ENABLE_FPGA)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx migraphx_tf migraphx_all_targets
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
add_test
(
NAME
${
TEST_NAME
}
COMMAND $<TARGET_FILE:
${
TEST_NAME
}
> WORKING_DIRECTORY
${
TEST_MULTI_TARGET_DIR
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
rocm_mark_as_test
(
${
TEST_NAME
}
)
endforeach
()
endif
()
function
(
test_header NAME HEADER
)
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
"#include <
${
HEADER
}
>
\n
int main() {}
\n
"
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
"
#include <
${
HEADER
}
>
int main() {}
\n
"
)
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-static-include-
${
NAME
}
.cpp
"#include <
${
HEADER
}
>
\n
"
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-static-include-
${
NAME
}
.cpp
"
#include <
${
HEADER
}
>
#if defined(min) || defined(max) || defined(near) || defined(far)
#error
\"
Do not include windows.h in header files
\"
#endif
\n
"
)
add_test_executable
(
${
NAME
}
rocm_
add_test_executable
(
${
NAME
}
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
${
CMAKE_CURRENT_BINARY_DIR
}
/header-static-include-
${
NAME
}
.cpp
)
...
...
@@ -236,13 +160,13 @@ test_headers(migraphx ${CMAKE_SOURCE_DIR}/src/include/migraphx/*.hpp)
test_headers
(
migraphx/ref
${
CMAKE_SOURCE_DIR
}
/src/targets/ref/include/migraphx/ref/*.hpp
)
if
(
MIGRAPHX_ENABLE_GPU
)
test_headers
(
migraphx/gpu
${
CMAKE_SOURCE_DIR
}
/src/targets/gpu/include/migraphx/gpu/*.hpp
)
test_headers
(
migraphx/gpu
HEADERS
${
CMAKE_SOURCE_DIR
}
/src/targets/gpu/include/migraphx/gpu/*.hpp
DEPENDS migraphx_gpu
)
endif
()
if
(
MIGRAPHX_ENABLE_CPU
)
test_headers
(
migraphx/cpu
${
CMAKE_SOURCE_DIR
}
/src/targets/cpu/include/migraphx/cpu/*.hpp
)
test_headers
(
migraphx/cpu
HEADERS
${
CMAKE_SOURCE_DIR
}
/src/targets/cpu/include/migraphx/cpu/*.hpp
migraphx_cpu
)
endif
()
if
(
MIGRAPHX_ENABLE_FPGA
)
test_headers
(
migraphx/fpga
${
CMAKE_SOURCE_DIR
}
/src/targets/fpga/include/migraphx/fpga/*.hpp
)
test_headers
(
migraphx/fpga
HEADERS
${
CMAKE_SOURCE_DIR
}
/src/targets/fpga/include/migraphx/fpga/*.hpp
migraphx_fpga
)
endif
()
test/api/test_cpu.cpp
View file @
32b83c9c
...
...
@@ -145,15 +145,15 @@ TEST_CASE(zero_parameter)
TEST_CASE
(
set_scalar_parameter
)
{
auto
p1
=
migraphx
::
parse_onnx
(
"add_bcast_test.onnx"
);
migraphx
::
shape
s1
(
migraphx_shape_float_type
,
{
3
,
4
});
auto
p1
=
migraphx
::
parse_onnx
(
"
implicit_
add_bcast_test.onnx"
);
migraphx
::
shape
s1
(
migraphx_shape_float_type
,
{
3
,
4
,
1
});
auto
param_shapes
=
p1
.
get_parameter_shapes
();
auto
s1_orig
=
param_shapes
[
"1"
];
CHECK
(
bool
{
s1
==
s1_orig
});
migraphx
::
onnx_options
option
;
option
.
set_input_parameter_shape
(
"1"
,
{});
auto
p2
=
migraphx
::
parse_onnx
(
"add_bcast_test.onnx"
,
option
);
auto
p2
=
migraphx
::
parse_onnx
(
"
implicit_
add_bcast_test.onnx"
,
option
);
migraphx
::
shape
s_scalar
(
migraphx_shape_float_type
);
auto
param_shapes_1
=
p2
.
get_parameter_shapes
();
auto
s_scalar_after
=
param_shapes_1
[
"1"
];
...
...
test/check_shapes_test.cpp
View file @
32b83c9c
...
...
@@ -31,24 +31,39 @@
using
migraphx
::
shape
;
bool
create_shapes
(
bool
dynamic_allowed
)
void
create_shapes
(
bool
dynamic_allowed
)
{
try
{
shape
a
{
shape
::
int64_type
,
{
3
}};
shape
b
{
shape
::
float_type
,
{{
3
,
6
},
{
4
,
4
}}};
auto
op
=
migraphx
::
make_op
(
"add"
);
migraphx
::
check_shapes
{{
a
,
b
},
op
,
dynamic_allowed
}.
has
(
2
);
return
true
;
}
catch
(...)
{
return
false
;
}
shape
a
{
shape
::
int64_type
,
{
3
}};
shape
b
{
shape
::
float_type
,
{{
3
,
6
},
{
4
,
4
}}};
migraphx
::
check_shapes
{{
a
,
b
},
""
,
dynamic_allowed
}.
has
(
2
);
}
TEST_CASE
(
allow_dynamic_shape
)
{
EXPECT
(
create_shapes
(
true
));
}
TEST_CASE
(
allow_dynamic_shape
)
{
EXPECT
(
not
test
::
throws
([]
{
create_shapes
(
true
);
}));
}
TEST_CASE
(
fail_dynamic_shape
)
{
EXPECT
(
test
::
throws
([]
{
create_shapes
(
false
);
}));
}
TEST_CASE
(
fail_dynamic_shape
)
{
EXPECT
(
not
create_shapes
(
false
));
}
TEST_CASE
(
same_layout_fail
)
{
EXPECT
(
test
::
throws
([]
{
shape
a
{
shape
::
float_type
,
{
2
,
3
}};
shape
b
{
shape
::
float_type
,
{
2
,
3
},
{
1
,
2
}};
migraphx
::
check_shapes
{{
a
,
b
},
""
}.
same_layout
();
}));
}
TEST_CASE
(
same_layout_pass
)
{
EXPECT
(
not
test
::
throws
([]
{
shape
a
{
shape
::
float_type
,
{
2
,
3
},
{
1
,
2
}};
shape
b
{
shape
::
float_type
,
{
2
,
3
},
{
1
,
2
}};
migraphx
::
check_shapes
{{
a
,
b
},
""
}.
same_layout
();
}));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/common_dims.cpp
0 → 100644
View file @
32b83c9c
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/common_dims.hpp>
#include <test.hpp>
using
axes_map
=
std
::
vector
<
std
::
vector
<
std
::
size_t
>>
;
TEST_CASE
(
common_d1_less
)
{
auto
cd
=
migraphx
::
common_dims
::
compute
({
2
,
32
,
40
,
8
},
{
2
,
1280
,
8
});
EXPECT
(
cd
.
dims
==
std
::
vector
<
std
::
size_t
>
{
2
,
32
,
40
,
8
});
EXPECT
(
cd
.
axes_map1
==
axes_map
{{
0
},
{
1
},
{
2
},
{
3
}});
EXPECT
(
cd
.
axes_map2
==
axes_map
{{
0
},
{
1
,
2
},
{
3
}});
}
TEST_CASE
(
common1
)
{
auto
cd
=
migraphx
::
common_dims
::
compute
({
2
,
32
,
2560
},
{
2
,
1280
,
8
,
8
});
EXPECT
(
cd
.
dims
==
std
::
vector
<
std
::
size_t
>
{
2
,
32
,
40
,
8
,
8
});
EXPECT
(
cd
.
axes_map1
==
axes_map
{{
0
},
{
1
},
{
2
,
3
,
4
}});
EXPECT
(
cd
.
axes_map2
==
axes_map
{{
0
},
{
1
,
2
},
{
3
},
{
4
}});
}
TEST_CASE
(
common2
)
{
auto
cd
=
migraphx
::
common_dims
::
compute
({
2
,
1280
,
8
,
8
},
{
2
,
32
,
2560
});
EXPECT
(
cd
.
dims
==
std
::
vector
<
std
::
size_t
>
{
2
,
32
,
40
,
8
,
8
});
EXPECT
(
cd
.
axes_map1
==
axes_map
{{
0
},
{
1
,
2
},
{
3
},
{
4
}});
EXPECT
(
cd
.
axes_map2
==
axes_map
{{
0
},
{
1
},
{
2
,
3
,
4
}});
}
TEST_CASE
(
common_error1
)
{
auto
cd
=
migraphx
::
common_dims
::
compute
({
6
,
35
},
{
3
,
7
,
2
,
5
});
EXPECT
(
cd
.
dims
.
empty
());
}
TEST_CASE
(
common_error2
)
{
auto
cd
=
migraphx
::
common_dims
::
compute
({
3
,
7
,
2
,
5
},
{
6
,
35
});
EXPECT
(
cd
.
dims
.
empty
());
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/eliminate_contiguous_test.cpp
View file @
32b83c9c
...
...
@@ -196,15 +196,47 @@ TEST_CASE(contiguous_pointwise)
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
yc
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
yb
);
auto
add
=
add_pointwise
(
p
,
"main:pointwise0"
,
{
x
,
yc
},
single_pointwise
(
"add"
));
mm
->
add_instruction
(
pass_op
{},
add
);
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
auto
count
=
std
::
distance
(
mm
->
begin
(),
mm
->
end
());
run_pass
(
*
mm
);
EXPECT
(
std
::
distance
(
mm
->
begin
(),
mm
->
end
())
==
(
count
-
1
));
EXPECT
(
std
::
distance
(
mm
->
begin
(),
mm
->
end
())
==
(
count
-
2
));
EXPECT
(
std
::
none_of
(
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
}
TEST_CASE
(
contiguous_nhwc_pointwise
)
{
auto
s
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
},
{
0
,
2
,
3
,
1
});
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
yb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
yc
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
yb
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
x
,
yc
},
single_pointwise
(
"add"
));
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
run_pass
(
*
p1
.
get_main_module
());
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
yb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
add
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x
,
yb
},
single_pointwise
(
"add"
));
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
slice_contiguous
)
{
migraphx
::
module
m
;
...
...
test/eliminate_pad_test.cpp
View file @
32b83c9c
...
...
@@ -27,7 +27,7 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/op
erators
.hpp>
#include <migraphx/op
/common
.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
@@ -58,9 +58,8 @@ create_conv(migraphx::instruction_ref& l_img,
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
4
,
channels
,
3
,
3
}};
std
::
vector
<
int32_t
>
weights
(
4
*
channels
*
3
*
3
);
auto
l_weights
=
m
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
padding_mode
;
return
m
.
add_instruction
(
op
,
l_img
,
l_weights
);
return
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding_mode"
,
padding_mode
}}),
l_img
,
l_weights
);
}
TEST_CASE
(
rewrite_pad
)
...
...
test/float_equal.cpp
View file @
32b83c9c
...
...
@@ -112,7 +112,10 @@ TEST_CASE_REGISTER(test_limits<double, int>);
TEST_CASE_REGISTER
(
test_limits
<
double
,
migraphx
::
half
>
);
TEST_CASE_REGISTER
(
test_limits
<
float
,
int
>
);
TEST_CASE_REGISTER
(
test_limits
<
int
,
migraphx
::
half
>
);
#ifndef _WIN32
// On Windows, types int and long have the same min and max values.
TEST_CASE_REGISTER
(
test_limits
<
long
,
int
>
);
#endif
TEST_CASE_REGISTER
(
test_limits
<
long
,
char
>
);
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/fuse_pointwise.cpp
View file @
32b83c9c
...
...
@@ -21,8 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/fuse_pointwise.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/program.hpp>
...
...
@@ -361,4 +362,154 @@ TEST_CASE(no_input)
EXPECT
(
p
==
p2
);
}
TEST_CASE
(
add_reshape_add
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
10
,
16
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
40
,
2
,
2
}};
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{
3
,
10
,
4
,
2
,
2
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
add1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
y
);
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
add1
);
auto
add2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
reshape
,
z
);
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
x2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
x
);
auto
y2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
y
);
auto
z2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
z
);
auto
fadd
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x2
,
y2
,
z2
},
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
auto
add1
=
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
inputs
[
0
],
inputs
[
1
]);
return
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
add1
,
inputs
[
2
]);
});
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
fadd
);
mm
->
add_return
({
reshape
});
}
EXPECT
(
p1
.
sort
()
==
p2
.
sort
());
}
TEST_CASE
(
add_reshape_add_nonstandard
)
{
migraphx
::
shape
s1
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
3
,
10
,
16
},
{
2
,
0
,
1
});
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
40
,
2
,
2
}};
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{
3
,
10
,
4
,
2
,
2
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
add1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
y
);
auto
c
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add1
);
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
c
);
auto
add2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
reshape
,
z
);
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
cx
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
x
);
auto
cy
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
y
);
auto
x2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
cx
);
auto
y2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
cy
);
auto
z2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s3
.
lens
()}}),
z
);
auto
fadd
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x2
,
y2
,
z2
},
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
auto
add1
=
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
inputs
[
0
],
inputs
[
1
]);
return
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
add1
,
inputs
[
2
]);
});
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
fadd
);
mm
->
add_return
({
reshape
});
}
EXPECT
(
p1
.
sort
()
==
p2
.
sort
());
}
TEST_CASE
(
add_unsqueeze_add_nonstandard
)
{
migraphx
::
shape
s1
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
3
,
10
,
16
},
{
2
,
0
,
1
});
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
10
,
1
,
16
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
add1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
y
);
auto
unsqueeze
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
add1
);
auto
add2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
unsqueeze
,
z
);
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
cx
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
x
);
auto
cy
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
y
);
auto
x2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
cx
);
auto
y2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
cy
);
auto
fadd
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x2
,
y2
,
z
},
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
auto
add1
=
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
inputs
[
0
],
inputs
[
1
]);
return
pm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
add1
,
inputs
[
2
]);
});
mm
->
add_return
({
fadd
});
}
EXPECT
(
p1
.
sort
()
==
p2
.
sort
());
}
TEST_CASE
(
add_reshape_add_error
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
6
,
35
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
7
,
2
,
5
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
add1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
y
);
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
add1
);
auto
add2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
reshape
,
z
);
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s1
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s1
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s2
);
auto
fadd1
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x
,
y
},
single_pointwise
(
"add"
));
auto
reshape
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
s2
.
lens
()}}),
fadd1
);
auto
fadd2
=
add_pointwise
(
p2
,
"main:pointwise1"
,
{
reshape
,
z
},
single_pointwise
(
"add"
));
mm
->
add_return
({
fadd2
});
}
EXPECT
(
p1
.
sort
()
==
p2
.
sort
());
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/jit.cpp
View file @
32b83c9c
...
...
@@ -218,6 +218,15 @@ TEST_CASE(compile_warnings)
#endif
}
TEST_CASE
(
has_flags
)
{
EXPECT
(
migraphx
::
gpu
::
hip_has_flags
({
"--std=c++17"
}));
EXPECT
(
not
migraphx
::
gpu
::
hip_has_flags
({
"--non-existent-flag-to-test-in-migraphx"
}));
EXPECT
(
migraphx
::
gpu
::
hip_has_flags
({
"-Wunused-parameter"
}));
EXPECT
(
not
migraphx
::
gpu
::
hip_has_flags
(
{
"-Wnon-existent-warnings-flag-to-test-in-migraphx"
,
"-Werror"
}));
}
TEST_CASE
(
code_object_hip
)
{
auto
binaries
=
migraphx
::
gpu
::
compile_hip_src
(
...
...
test/gpu/mlir.cpp
View file @
32b83c9c
...
...
@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
...
...
@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution
_add_relu
(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
...
...
@@ -191,7 +191,7 @@ TEST_CASE(quant_dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_dot_add
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.quant_dot(%arg0, %arg1) : (tensor<1x5x4xi8>, tensor<1x4x3xi8>) -> tensor<1x5x3xi32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xi32>, tensor<1x5x3xi32>) -> tensor<1x5x3xi32>
return %1 : tensor<1x5x3xi32>
...
...
@@ -218,7 +218,7 @@ TEST_CASE(dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_add
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
@@ -244,7 +244,7 @@ TEST_CASE(conv_int8_dequantize_quantize)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_convolution_dequantizelinear_quantizelinear
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
...
...
@@ -277,7 +277,7 @@ TEST_CASE(dot_convert)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_convert
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.convert(%0) {target_type = 1 : i64} : (tensor<1x5x3xf32>) -> tensor<1x5x3xf16>
return %1 : tensor<1x5x3xf16>
...
...
@@ -303,7 +303,7 @@ TEST_CASE(dot_where)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_where
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.where(%arg2, %0, %arg3) : (tensor<1x5x3xi8>, tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
test/gpu/quantization.cpp
View file @
32b83c9c
...
...
@@ -24,7 +24,7 @@
#include <iostream>
#include <vector>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/
operators
.hpp>
#include <migraphx/
make_op
.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
...
...
@@ -90,7 +90,7 @@ TEST_CASE(int8_quantization)
migraphx
::
shape
sc
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
auto
pa
=
mm
->
add_parameter
(
"a"
,
sa
);
auto
pb
=
mm
->
add_parameter
(
"b"
,
sb
);
mm
->
add_instruction
(
migraphx
::
op
::
dot
{}
,
pa
,
pb
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"
dot
"
)
,
pa
,
pb
);
return
p
;
};
...
...
test/include/test.hpp
View file @
32b83c9c
...
...
@@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#include <atomic>
#include <algorithm>
#include <cassert>
#include <cstdio>
...
...
@@ -342,11 +343,19 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
return
os
;
}
inline
std
::
atomic
<
int
>&
failures
()
{
// NOLINTNEXTLINE
static
std
::
atomic
<
int
>
f
=
0
;
return
f
;
}
template
<
class
T
,
class
F
>
void
failed
(
T
x
,
const
char
*
msg
,
const
char
*
func
,
const
char
*
file
,
int
line
,
F
f
)
{
if
(
not
bool
(
x
.
value
()))
{
failures
()
++
;
std
::
cout
<<
func
<<
std
::
endl
;
std
::
cout
<<
file
<<
":"
<<
line
<<
":"
<<
std
::
endl
;
std
::
cout
<<
color
::
bold
<<
color
::
fg_red
<<
" FAILED: "
<<
color
::
reset
<<
msg
<<
" "
...
...
@@ -586,13 +595,21 @@ struct driver
{
try
{
failures
()
=
0
;
f
();
}
// cppcheck-suppress EmptyCatchStatement
catch
(
const
failure_error
&
)
{
msg
=
"Test failure"
;
}
}
if
(
msg
.
empty
()
and
failures
()
!=
0
)
{
if
(
failures
()
==
1
)
msg
=
"Test failure"
;
else
msg
=
std
::
to_string
(
failures
())
+
" test failures"
;
}
if
(
msg
.
empty
())
{
out
()
<<
color
::
fg_green
<<
"[ COMPLETE ] "
<<
color
::
reset
<<
color
::
bold
<<
name
...
...
@@ -683,10 +700,10 @@ inline void run(int argc, const char* argv[])
#define TEST_CAPTURE(...) test::capture{}->*__VA_ARGS__
// NOLINTNEXTLINE
#define CHECK(...)
\
test::failed(
\
test::capture{}->*
__VA_ARGS__, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] {
\
})
#define CHECK(...) \
test::failed( \
TEST_CAPTURE(
__VA_ARGS__
)
, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] {
})
// NOLINTNEXTLINE
#define EXPECT(...) \
test::failed(TEST_CAPTURE(__VA_ARGS__), \
...
...
test/inline_module_test.cpp
View file @
32b83c9c
...
...
@@ -26,7 +26,6 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
test/insert_pad_test.cpp
View file @
32b83c9c
...
...
@@ -26,8 +26,8 @@
#include <migraphx/insert_pad.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/op/common.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
@@ -58,10 +58,11 @@ create_conv(migraphx::instruction_ref& l_img,
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
4
,
channels
,
3
,
3
}};
std
::
vector
<
int32_t
>
weights
(
4
*
channels
*
3
*
3
);
auto
l_weights
=
m
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
padding_mode
;
op
.
padding
=
{
0
,
0
,
1
,
1
};
return
m
.
add_instruction
(
op
,
l_img
,
l_weights
);
return
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding_mode"
,
padding_mode
},
{
"padding"
,
{
0
,
0
,
1
,
1
}}}),
l_img
,
l_weights
);
}
TEST_CASE
(
rewrite_pad
)
...
...
Prev
1
…
3
4
5
6
7
8
9
10
11
…
15
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment