Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
5c75ff5c
Unverified
Commit
5c75ff5c
authored
Sep 26, 2023
by
zjing14
Committed by
GitHub
Sep 26, 2023
Browse files
Merge branch 'develop' into ab_blk_copy_multi_d
parents
96417775
48ba6e8a
Changes
181
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
269 additions
and
381 deletions
+269
-381
test/batched_gemm_gemm/CMakeLists.txt
test/batched_gemm_gemm/CMakeLists.txt
+4
-4
test/batched_gemm_multi_d/CMakeLists.txt
test/batched_gemm_multi_d/CMakeLists.txt
+2
-2
test/batched_gemm_multi_d/test_batched_gemm_multi_d_dl.cpp
test/batched_gemm_multi_d/test_batched_gemm_multi_d_dl.cpp
+0
-0
test/batched_gemm_reduce/CMakeLists.txt
test/batched_gemm_reduce/CMakeLists.txt
+3
-4
test/batched_gemm_softmax_gemm/CMakeLists.txt
test/batched_gemm_softmax_gemm/CMakeLists.txt
+7
-7
test/batched_gemm_softmax_gemm_permute/CMakeLists.txt
test/batched_gemm_softmax_gemm_permute/CMakeLists.txt
+22
-19
test/batchnorm/batchnorm_bwd_rank_4.cpp
test/batchnorm/batchnorm_bwd_rank_4.cpp
+17
-4
test/batchnorm/batchnorm_fwd_rank_4.cpp
test/batchnorm/batchnorm_fwd_rank_4.cpp
+17
-4
test/batchnorm/batchnorm_infer_rank_4.cpp
test/batchnorm/batchnorm_infer_rank_4.cpp
+17
-4
test/data_type/CMakeLists.txt
test/data_type/CMakeLists.txt
+8
-7
test/data_type/fp8.cpp
test/data_type/fp8.cpp
+0
-0
test/elementwise_normalization/CMakeLists.txt
test/elementwise_normalization/CMakeLists.txt
+3
-3
test/gemm/CMakeLists.txt
test/gemm/CMakeLists.txt
+14
-16
test/gemm_layernorm/CMakeLists.txt
test/gemm_layernorm/CMakeLists.txt
+5
-5
test/gemm_reduce/CMakeLists.txt
test/gemm_reduce/CMakeLists.txt
+3
-4
test/grouped_convnd_fwd/CMakeLists.txt
test/grouped_convnd_fwd/CMakeLists.txt
+1
-1
test/grouped_convnd_fwd/grouped_convnd_fwd.cpp
test/grouped_convnd_fwd/grouped_convnd_fwd.cpp
+0
-284
test/grouped_convnd_fwd/test_grouped_convnd_fwd.cpp
test/grouped_convnd_fwd/test_grouped_convnd_fwd.cpp
+133
-0
test/grouped_gemm/CMakeLists.txt
test/grouped_gemm/CMakeLists.txt
+0
-2
test/normalization/CMakeLists.txt
test/normalization/CMakeLists.txt
+13
-11
No files found.
test/batched_gemm_gemm/CMakeLists.txt
View file @
5c75ff5c
...
...
@@ -2,12 +2,12 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_
custom_target
(
test_batched_gemm_gemm
)
add_gtest_executable
(
test_batched_gemm_gemm_fp16 test_batched_gemm_gemm_fp16.cpp
)
add_custom_target
(
test_batched_gemm_gemm
)
add_
gtest_executable
(
test_batched_gemm_gemm_fp16
test_batched_gemm_gemm
_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_gemm_fp16 PRIVATE utility device_batched_gemm_gemm_instance
)
add_dependencies
(
test_batched_gemm_gemm test_batched_gemm_gemm_fp16
)
set
(
target 1
)
endif
()
endif
()
endif
()
endforeach
()
\ No newline at end of file
test/batched_gemm_multi_d/CMakeLists.txt
View file @
5c75ff5c
if
(
DL_KERNELS
)
add_gtest_executable
(
test_batched_gemm_multi_d test_batched_gemm_multi_d.cpp
)
add_gtest_executable
(
test_batched_gemm_multi_d test_batched_gemm_multi_d_dl.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_multi_d PRIVATE utility device_batched_gemm_multi_d_instance
)
endif
()
test/batched_gemm_multi_d/test_batched_gemm_multi_d.cpp
→
test/batched_gemm_multi_d/test_batched_gemm_multi_d
_dl
.cpp
View file @
5c75ff5c
File moved
test/batched_gemm_reduce/CMakeLists.txt
View file @
5c75ff5c
...
...
@@ -2,10 +2,9 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_batched_gemm_reduce_fp16 batched_gemm_reduce_fp16.cpp
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE utility
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE device_batched_gemm_reduce_instance
)
add_test_executable
(
test_batched_gemm_reduce_fp16 batched_gemm_reduce_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_reduce_fp16 PRIVATE utility device_batched_gemm_reduce_instance
)
set
(
target 1
)
endif
()
endif
()
...
...
test/batched_gemm_softmax_gemm/CMakeLists.txt
View file @
5c75ff5c
...
...
@@ -2,12 +2,12 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_
custom_target
(
test_batched_gemm_softmax_gemm
)
add_gtest_executable
(
test_batched_gemm_softmax_gemm_fp16 test_batched_gemm_softmax_gemm_fp16.cpp
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm test_batched_gemm_softmax_gemm_fp16
)
set
(
target 1
)
endif
()
add_custom_target
(
test_batched_gemm_softmax_gemm
)
add_
gtest_executable
(
test_batched_gemm_softmax_gemm_fp16
test_batched_gemm_softmax_gemm
_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm test_batched_gemm_softmax_gemm_fp16
)
set
(
target 1
)
endif
()
endif
()
endforeach
()
\ No newline at end of file
test/batched_gemm_softmax_gemm_permute/CMakeLists.txt
View file @
5c75ff5c
...
...
@@ -2,25 +2,28 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
DTYPES MATCHES
"fp16"
OR DTYPES MATCHES
"bf16"
OR NOT DEFINED DTYPES
)
add_custom_target
(
test_batched_gemm_softmax_gemm_permute
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_batched_gemm_softmax_gemm_permute_fp16 test_batched_gemm_softmax_gemm_permute_fp16.cpp
)
add_gtest_executable
(
test_batched_gemm_bias_softmax_gemm_permute_fp16 test_batched_gemm_bias_softmax_gemm_permute_fp16.cpp
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_permute_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
target_link_libraries
(
test_batched_gemm_bias_softmax_gemm_permute_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_softmax_gemm_permute_fp16
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_bias_softmax_gemm_permute_fp16
)
endif
()
if
(
DTYPES MATCHES
"bf16"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_batched_gemm_softmax_gemm_permute_bf16 test_batched_gemm_softmax_gemm_permute_bf16.cpp
)
add_gtest_executable
(
test_batched_gemm_bias_softmax_gemm_permute_bf16 test_batched_gemm_bias_softmax_gemm_permute_bf16.cpp
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_permute_bf16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
target_link_libraries
(
test_batched_gemm_bias_softmax_gemm_permute_bf16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_softmax_gemm_permute_bf16
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_bias_softmax_gemm_permute_bf16
)
endif
()
add_custom_target
(
test_batched_gemm_softmax_gemm_permute
)
add_gtest_executable
(
test_batched_gemm_softmax_gemm_permute_fp16 test_batched_gemm_softmax_gemm_permute_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_permute_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_softmax_gemm_permute_fp16
)
endif
()
add_gtest_executable
(
test_batched_gemm_bias_softmax_gemm_permute_fp16 test_batched_gemm_bias_softmax_gemm_permute_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_bias_softmax_gemm_permute_fp16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_bias_softmax_gemm_permute_fp16
)
endif
()
add_gtest_executable
(
test_batched_gemm_softmax_gemm_permute_bf16 test_batched_gemm_softmax_gemm_permute_bf16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_softmax_gemm_permute_bf16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_softmax_gemm_permute_bf16
)
endif
()
add_gtest_executable
(
test_batched_gemm_bias_softmax_gemm_permute_bf16 test_batched_gemm_bias_softmax_gemm_permute_bf16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_batched_gemm_bias_softmax_gemm_permute_bf16 PRIVATE utility device_batched_gemm_softmax_gemm_permute_instance
)
add_dependencies
(
test_batched_gemm_softmax_gemm_permute test_batched_gemm_bias_softmax_gemm_permute_bf16
)
endif
()
set
(
target 1
)
endif
()
endforeach
()
\ No newline at end of file
test/batchnorm/batchnorm_bwd_rank_4.cpp
View file @
5c75ff5c
...
...
@@ -70,10 +70,23 @@ class TestBatchNormBwdRank4 : public ::testing::Test
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F32
,
F32
,
F32
,
F16
,
F32
,
F32
>
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
,
std
::
tuple
<
BF16
,
F32
,
F32
,
F32
,
BF16
,
F32
,
F32
>
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>>
;
using
KernelTypes
=
::
testing
::
Types
<
#ifdef CK_ENABLE_FP16
std
::
tuple
<
F16
,
F32
,
F32
,
F32
,
F16
,
F32
,
F32
>
#endif
#ifdef CK_ENABLE_FP32
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
#endif
#ifdef CK_ENABLE_BF16
,
std
::
tuple
<
BF16
,
F32
,
F32
,
F32
,
BF16
,
F32
,
F32
>
#endif
#ifdef CK_ENABLE_FP64
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>
#endif
>
;
TYPED_TEST_SUITE
(
TestBatchNormBwdRank4
,
KernelTypes
);
...
...
test/batchnorm/batchnorm_fwd_rank_4.cpp
View file @
5c75ff5c
...
...
@@ -87,10 +87,23 @@ class TestBatchNormFwdRank4 : public ::testing::Test
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>>
;
using
KernelTypes
=
::
testing
::
Types
<
#ifdef CK_ENABLE_FP16
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
#endif
#ifdef CK_ENABLE_FP32
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
#endif
#ifdef CK_ENABLE_BF16
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
#endif
#ifdef CK_ENABLE_FP64
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>
#endif
>
;
TYPED_TEST_SUITE
(
TestBatchNormFwdRank4
,
KernelTypes
);
...
...
test/batchnorm/batchnorm_infer_rank_4.cpp
View file @
5c75ff5c
...
...
@@ -67,10 +67,23 @@ class TestBatchNormInferRank4 : public ::testing::Test
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>>
;
using
KernelTypes
=
::
testing
::
Types
<
#ifdef CK_ENABLE_FP16
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
#endif
#ifdef CK_ENABLE_FP32
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
#endif
#ifdef CK_ENABLE_BF16
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
#endif
#ifdef CK_ENABLE_FP64
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>
#endif
>
;
TYPED_TEST_SUITE
(
TestBatchNormInferRank4
,
KernelTypes
);
...
...
test/data_type/CMakeLists.txt
View file @
5c75ff5c
if
(
USE_BITINT_EXTENSION_INT4
)
add_gtest_executable
(
test_int4 int4.cpp
)
target_link_libraries
(
test_int4 PRIVATE utility
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_int4 PRIVATE utility
)
endif
()
endif
()
if
(
DTYPES MATCHES
"fp8"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_f8 f8.cpp
)
target_link_libraries
(
test_f8 PRIVATE utility
)
add_gtest_executable
(
test_fp8 fp8.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_f
p
8 PRIVATE utility
)
endif
()
if
(
DTYPES MATCHES
"bf8"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_bf8 bf8.cpp
)
add_gtest_executable
(
test_bf8 bf8.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_bf8 PRIVATE utility
)
endif
()
test/data_type/f8.cpp
→
test/data_type/f
p
8.cpp
View file @
5c75ff5c
File moved
test/elementwise_normalization/CMakeLists.txt
View file @
5c75ff5c
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_
custom_target
(
test_elementwise_
normalization
)
add_gtest_executable
(
test_elementwise_layernorm_fp16 test_elementwise_layernorm_fp16.cpp
)
add_custom_target
(
test_elementwise_normalization
)
add_
gtest_executable
(
test_elementwise_
layernorm_fp16 test_elementwise_layernorm_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_elementwise_layernorm_fp16 PRIVATE utility device_elementwise_normalization_instance
)
add_dependencies
(
test_elementwise_normalization test_elementwise_layernorm_fp16
)
endif
()
\ No newline at end of file
test/gemm/CMakeLists.txt
View file @
5c75ff5c
if
(
DTYPES MATCHES
"fp32"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_fp32 gemm_fp32.cpp
)
target_link_libraries
(
test_gemm_fp32 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp32 PRIVATE device_gemm_instance
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_fp32 PRIVATE
utility
device_gemm_instance
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_fp16 gemm_fp16.cpp
)
target_link_libraries
(
test_gemm_fp16 PRIVATE utility
)
target_link_libraries
(
test_gemm_fp16 PRIVATE device_gemm_instance
)
add_library
(
gemm_standalone_xdl_fp16_instances STATIC
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_fp16 PRIVATE
utility
device_gemm_instance
)
add_library
(
gemm_standalone_xdl_fp16_instances STATIC
instance/gemm_f16_nn_instance.cpp
instance/gemm_f16_nt_instance.cpp
instance/gemm_f16_tn_instance.cpp
instance/gemm_wavelet_f16_tn_instance.cpp
instance/gemm_f16_tt_instance.cpp
)
)
endif
()
add_test_executable
(
test_gemm_standalone_xdl_fp16 gemm_standalone_xdl_fp16.cpp
)
target_link_libraries
(
test_gemm_standalone_xdl_fp16 PRIVATE gemm_standalone_xdl_fp16_instances utility
)
target_include_directories
(
test_gemm_standalone_xdl_fp16 PRIVATE instance/
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_standalone_xdl_fp16 PRIVATE gemm_standalone_xdl_fp16_instances utility
)
target_include_directories
(
test_gemm_standalone_xdl_fp16 PRIVATE instance/
)
endif
()
if
(
DTYPES MATCHES
"bf16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_bf16 gemm_bf16.cpp
)
target_link_libraries
(
test_gemm_bf16 PRIVATE utility
)
target_link_libraries
(
test_gemm_bf16 PRIVATE device_gemm_instance
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_bf16 PRIVATE
utility
device_gemm_instance
)
endif
()
if
(
DTYPES MATCHES
"int8"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_int8 gemm_int8.cpp
)
target_link_libraries
(
test_gemm_int8 PRIVATE utility
)
target_link_libraries
(
test_gemm_int8 PRIVATE device_gemm_instance
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_int8 PRIVATE
utility
device_gemm_instance
)
endif
()
\ No newline at end of file
test/gemm_layernorm/CMakeLists.txt
View file @
5c75ff5c
...
...
@@ -2,12 +2,12 @@ list(APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
if
(
gpu IN_LIST gpu_list AND target EQUAL 0
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_custom_target
(
test_gemm_layernorm
)
add_gtest_executable
(
test_gemm_add_relu_add_layernorm_fp16 test_gemm_add_relu_add_layernorm_fp16.cpp
)
target_link_libraries
(
test_gemm_add_relu_add_layernorm_fp16 PRIVATE utility device_gemm_add_relu_add_layernorm_instance
)
add_dependencies
(
test_gemm_layernorm test_gemm_add_relu_add_layernorm_fp16
)
set
(
target 1
)
endif
()
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_add_relu_add_layernorm_fp16 PRIVATE utility device_gemm_add_relu_add_layernorm_instance
)
add_dependencies
(
test_gemm_layernorm test_gemm_add_relu_add_layernorm_fp16
)
set
(
target 1
)
endif
()
endif
()
endforeach
()
test/gemm_reduce/CMakeLists.txt
View file @
5c75ff5c
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_test_executable
(
test_gemm_reduce_fp16 gemm_reduce_fp16.cpp
)
target_link_libraries
(
test_gemm_reduce_fp16 PRIVATE utility
)
target_link_libraries
(
test_gemm_reduce_fp16 PRIVATE device_gemm_reduce_instance
)
add_test_executable
(
test_gemm_reduce_fp16 gemm_reduce_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_gemm_reduce_fp16 PRIVATE utility device_gemm_reduce_instance
)
endif
()
\ No newline at end of file
test/grouped_convnd_fwd/CMakeLists.txt
View file @
5c75ff5c
add_gtest_executable
(
test_grouped_convnd_fwd grouped_convnd_fwd.cpp
)
add_gtest_executable
(
test_grouped_convnd_fwd
test_
grouped_convnd_fwd.cpp
)
target_link_libraries
(
test_grouped_convnd_fwd PRIVATE utility device_grouped_conv1d_fwd_instance device_grouped_conv2d_fwd_instance device_grouped_conv3d_fwd_instance
)
test/grouped_convnd_fwd/grouped_convnd_fwd.cpp
deleted
100644 → 0
View file @
96417775
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "profiler/profile_grouped_conv_fwd_impl.hpp"
class
TestGroupedConvNdFwd
:
public
::
testing
::
Test
{
protected:
std
::
vector
<
ck
::
utils
::
conv
::
ConvParam
>
conv_params
;
};
// 1d GNWC/GKXC/GNWK
TEST_F
(
TestGroupedConvNdFwd
,
GroupedConv1dFwdGNWC
)
{
conv_params
.
clear
();
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
14
},
{
2
},
{
1
},
{
0
},
{
0
}});
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
3
},
{
28
},
{
1
},
{
1
},
{
1
},
{
1
}});
conv_params
.
push_back
({
1
,
2
,
128
,
128
,
256
,
{
1
},
{
3
},
{
1
},
{
1
},
{
0
},
{
0
}});
conv_params
.
push_back
({
1
,
1
,
1
,
1
,
32
,
{
3
},
{
32
},
{
1
},
{
1
},
{
1
},
{
1
}});
conv_params
.
push_back
({
1
,
1
,
1
,
64
,
3
,
{
3
},
{
32
},
{
1
},
{
1
},
{
1
},
{
1
}});
for
(
auto
&
param
:
conv_params
)
{
bool
pass
;
// fp32
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
1
,
ck
::
tensor_layout
::
convolution
::
GNWC
,
ck
::
tensor_layout
::
convolution
::
GKXC
,
ck
::
tensor_layout
::
convolution
::
GNWK
,
float
,
float
,
float
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// fp16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
1
,
ck
::
tensor_layout
::
convolution
::
GNWC
,
ck
::
tensor_layout
::
convolution
::
GKXC
,
ck
::
tensor_layout
::
convolution
::
GNWK
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// bf16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
1
,
ck
::
tensor_layout
::
convolution
::
GNWC
,
ck
::
tensor_layout
::
convolution
::
GKXC
,
ck
::
tensor_layout
::
convolution
::
GNWK
,
ck
::
bhalf_t
,
ck
::
bhalf_t
,
ck
::
bhalf_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// int8
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
1
,
ck
::
tensor_layout
::
convolution
::
GNWC
,
ck
::
tensor_layout
::
convolution
::
GKXC
,
ck
::
tensor_layout
::
convolution
::
GNWK
,
int8_t
,
int8_t
,
int8_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
}
}
// 2d GNHWC/GKYXC/GNHWK
TEST_F
(
TestGroupedConvNdFwd
,
GroupedConv2dFwdGNHWC
)
{
conv_params
.
clear
();
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
32
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
1
,
1
,
64
,
3
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
1
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
for
(
auto
&
param
:
conv_params
)
{
bool
pass
;
// fp32
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
2
,
ck
::
tensor_layout
::
convolution
::
GNHWC
,
ck
::
tensor_layout
::
convolution
::
GKYXC
,
ck
::
tensor_layout
::
convolution
::
GNHWK
,
float
,
float
,
float
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// fp16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
2
,
ck
::
tensor_layout
::
convolution
::
GNHWC
,
ck
::
tensor_layout
::
convolution
::
GKYXC
,
ck
::
tensor_layout
::
convolution
::
GNHWK
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// bf16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
2
,
ck
::
tensor_layout
::
convolution
::
GNHWC
,
ck
::
tensor_layout
::
convolution
::
GKYXC
,
ck
::
tensor_layout
::
convolution
::
GNHWK
,
ck
::
bhalf_t
,
ck
::
bhalf_t
,
ck
::
bhalf_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// int8
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
2
,
ck
::
tensor_layout
::
convolution
::
GNHWC
,
ck
::
tensor_layout
::
convolution
::
GKYXC
,
ck
::
tensor_layout
::
convolution
::
GNHWK
,
int8_t
,
int8_t
,
int8_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
}
}
// 3d GNDHWC/GKZYXC/GNDHWK
TEST_F
(
TestGroupedConvNdFwd
,
GroupedConv3dFwdGNDHWC
)
{
conv_params
.
clear
();
conv_params
.
push_back
(
{
3
,
2
,
128
,
128
,
256
,
{
1
,
1
,
1
},
{
7
,
7
,
7
},
{
2
,
2
,
2
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
conv_params
.
push_back
(
{
3
,
2
,
128
,
128
,
256
,
{
3
,
3
,
3
},
{
14
,
14
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
conv_params
.
push_back
(
{
3
,
2
,
128
,
128
,
256
,
{
1
,
1
,
1
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
conv_params
.
push_back
(
{
3
,
1
,
1
,
1
,
32
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
{
3
,
1
,
1
,
64
,
3
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
conv_params
.
push_back
(
{
3
,
1
,
1
,
1
,
1
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
for
(
auto
&
param
:
conv_params
)
{
bool
pass
;
// fp32
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
3
,
ck
::
tensor_layout
::
convolution
::
GNDHWC
,
ck
::
tensor_layout
::
convolution
::
GKZYXC
,
ck
::
tensor_layout
::
convolution
::
GNDHWK
,
float
,
float
,
float
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// fp16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
3
,
ck
::
tensor_layout
::
convolution
::
GNDHWC
,
ck
::
tensor_layout
::
convolution
::
GKZYXC
,
ck
::
tensor_layout
::
convolution
::
GNDHWK
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// bf16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
3
,
ck
::
tensor_layout
::
convolution
::
GNDHWC
,
ck
::
tensor_layout
::
convolution
::
GKZYXC
,
ck
::
tensor_layout
::
convolution
::
GNDHWK
,
ck
::
bhalf_t
,
ck
::
bhalf_t
,
ck
::
bhalf_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
// int8
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
3
,
ck
::
tensor_layout
::
convolution
::
GNDHWC
,
ck
::
tensor_layout
::
convolution
::
GKZYXC
,
ck
::
tensor_layout
::
convolution
::
GNDHWK
,
int8_t
,
int8_t
,
int8_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
}
}
// 2d NHWGC/KYXGC/NHWGK
TEST_F
(
TestGroupedConvNdFwd
,
GroupedConv2dFwdNHWGC
)
{
conv_params
.
clear
();
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
2
,
128
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
32
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
1
,
1
,
64
,
3
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
1
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
for
(
auto
&
param
:
conv_params
)
{
bool
pass
;
// fp16
pass
=
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
2
,
ck
::
tensor_layout
::
convolution
::
NHWGC
,
ck
::
tensor_layout
::
convolution
::
GKYXC
,
ck
::
tensor_layout
::
convolution
::
NHWGK
,
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
>
(
true
,
// do_verification
1
,
// init_method
false
,
// do_log
false
,
// time_kernel
param
);
EXPECT_TRUE
(
pass
);
}
}
test/grouped_convnd_fwd/test_grouped_convnd_fwd.cpp
0 → 100644
View file @
5c75ff5c
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "profiler/profile_grouped_conv_fwd_impl.hpp"
template
<
typename
Tuple
>
class
TestGroupedConvndFwd
:
public
::
testing
::
Test
{
protected:
using
DataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
InLayout
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
WeiLayout
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
OutLayout
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
std
::
vector
<
ck
::
utils
::
conv
::
ConvParam
>
conv_params
;
template
<
ck
::
index_t
NDimSpatial
>
void
Run
()
{
EXPECT_FALSE
(
conv_params
.
empty
());
bool
pass
=
true
;
for
(
auto
&
param
:
conv_params
)
{
pass
=
pass
&&
ck
::
profiler
::
profile_grouped_conv_fwd_impl
<
NDimSpatial
,
InLayout
,
WeiLayout
,
OutLayout
,
DataType
,
DataType
,
DataType
>
(
true
,
// do_verification
1
,
// init_method: integer value
false
,
// do_log
false
,
// time_kernel
param
);
}
EXPECT_TRUE
(
pass
);
}
};
using
namespace
ck
::
tensor_layout
::
convolution
;
using
KernelTypes1d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNWC
,
GKXC
,
GNWK
>
,
std
::
tuple
<
ck
::
half_t
,
GNWC
,
GKXC
,
GNWK
>
,
std
::
tuple
<
ck
::
bhalf_t
,
GNWC
,
GKXC
,
GNWK
>
,
std
::
tuple
<
int8_t
,
GNWC
,
GKXC
,
GNWK
>>
;
using
KernelTypes2d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNHWC
,
GKYXC
,
GNHWK
>
,
std
::
tuple
<
ck
::
half_t
,
GNHWC
,
GKYXC
,
GNHWK
>
,
std
::
tuple
<
ck
::
bhalf_t
,
GNHWC
,
GKYXC
,
GNHWK
>
,
std
::
tuple
<
int8_t
,
GNHWC
,
GKYXC
,
GNHWK
>
,
std
::
tuple
<
float
,
NHWGC
,
GKYXC
,
NHWGK
>
,
std
::
tuple
<
ck
::
half_t
,
NHWGC
,
GKYXC
,
NHWGK
>
,
std
::
tuple
<
ck
::
bhalf_t
,
NHWGC
,
GKYXC
,
NHWGK
>
,
std
::
tuple
<
int8_t
,
NHWGC
,
GKYXC
,
NHWGK
>>
;
using
KernelTypes3d
=
::
testing
::
Types
<
std
::
tuple
<
float
,
GNDHWC
,
GKZYXC
,
GNDHWK
>
,
std
::
tuple
<
ck
::
half_t
,
GNDHWC
,
GKZYXC
,
GNDHWK
>
,
std
::
tuple
<
ck
::
bhalf_t
,
GNDHWC
,
GKZYXC
,
GNDHWK
>
,
std
::
tuple
<
int8_t
,
GNDHWC
,
GKZYXC
,
GNDHWK
>
,
std
::
tuple
<
float
,
NDHWGC
,
GKZYXC
,
NDHWGK
>
,
std
::
tuple
<
ck
::
half_t
,
NDHWGC
,
GKZYXC
,
NDHWGK
>
,
std
::
tuple
<
ck
::
bhalf_t
,
NDHWGC
,
GKZYXC
,
NDHWGK
>
,
std
::
tuple
<
int8_t
,
NDHWGC
,
GKZYXC
,
NDHWGK
>>
;
template
<
typename
Tuple
>
class
TestGroupedConvndFwd1d
:
public
TestGroupedConvndFwd
<
Tuple
>
{
};
template
<
typename
Tuple
>
class
TestGroupedConvndFwd2d
:
public
TestGroupedConvndFwd
<
Tuple
>
{
};
template
<
typename
Tuple
>
class
TestGroupedConvndFwd3d
:
public
TestGroupedConvndFwd
<
Tuple
>
{
};
TYPED_TEST_SUITE
(
TestGroupedConvndFwd1d
,
KernelTypes1d
);
TYPED_TEST_SUITE
(
TestGroupedConvndFwd2d
,
KernelTypes2d
);
TYPED_TEST_SUITE
(
TestGroupedConvndFwd3d
,
KernelTypes3d
);
TYPED_TEST
(
TestGroupedConvndFwd1d
,
Test1D
)
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
({
1
,
2
,
32
,
128
,
256
,
{
1
},
{
14
},
{
2
},
{
1
},
{
0
},
{
0
}});
this
->
conv_params
.
push_back
({
1
,
2
,
32
,
128
,
256
,
{
3
},
{
28
},
{
1
},
{
1
},
{
1
},
{
1
}});
this
->
conv_params
.
push_back
({
1
,
2
,
32
,
128
,
256
,
{
1
},
{
3
},
{
1
},
{
1
},
{
0
},
{
0
}});
this
->
conv_params
.
push_back
({
1
,
1
,
1
,
1
,
32
,
{
3
},
{
32
},
{
1
},
{
1
},
{
1
},
{
1
}});
this
->
conv_params
.
push_back
({
1
,
1
,
1
,
64
,
3
,
{
3
},
{
32
},
{
1
},
{
1
},
{
1
},
{
1
}});
this
->
template
Run
<
1
>();
}
TYPED_TEST
(
TestGroupedConvndFwd2d
,
Test2D
)
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
(
{
2
,
2
,
32
,
128
,
256
,
{
1
,
1
},
{
7
,
7
},
{
2
,
2
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
conv_params
.
push_back
(
{
2
,
2
,
32
,
128
,
256
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
conv_params
.
push_back
(
{
2
,
2
,
32
,
128
,
256
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
32
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
conv_params
.
push_back
({
2
,
1
,
1
,
64
,
3
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
conv_params
.
push_back
({
2
,
1
,
1
,
1
,
1
,
{
3
,
3
},
{
32
,
32
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
template
Run
<
2
>();
}
TYPED_TEST
(
TestGroupedConvndFwd3d
,
Test3D
)
{
this
->
conv_params
.
clear
();
this
->
conv_params
.
push_back
(
{
3
,
2
,
32
,
128
,
256
,
{
1
,
1
,
1
},
{
7
,
7
,
7
},
{
2
,
2
,
2
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
this
->
conv_params
.
push_back
(
{
3
,
2
,
32
,
128
,
256
,
{
3
,
3
,
3
},
{
14
,
14
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
{
3
,
2
,
32
,
128
,
256
,
{
1
,
1
,
1
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
0
,
0
,
0
},
{
0
,
0
,
0
}});
this
->
conv_params
.
push_back
(
{
3
,
1
,
1
,
1
,
32
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
{
3
,
1
,
1
,
64
,
3
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
conv_params
.
push_back
(
{
3
,
1
,
1
,
1
,
1
,
{
3
,
3
,
3
},
{
32
,
32
,
32
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
},
{
1
,
1
,
1
}});
this
->
template
Run
<
3
>();
}
test/grouped_gemm/CMakeLists.txt
View file @
5c75ff5c
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND gpu_list gfx908 gfx90a gfx940 gfx941 gfx942
)
set
(
target 0
)
foreach
(
gpu IN LISTS GPU_TARGETS
)
...
...
@@ -13,4 +12,3 @@ foreach(gpu IN LISTS GPU_TARGETS)
set
(
target 1
)
endif
()
endforeach
()
endif
()
test/normalization/CMakeLists.txt
View file @
5c75ff5c
if
(
DTYPES MATCHES
"fp16"
OR DTYPES MATCHES
"fp32"
OR NOT DEFINED DTYPES
)
add_custom_target
(
test_normalization
)
endif
()
if
(
DTYPES MATCHES
"fp32"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_layernorm2d_fp32 test_layernorm2d_fp32.cpp
)
add_gtest_executable
(
test_groupnorm_fp32 test_groupnorm_fp32.cpp
)
add_custom_target
(
test_normalization
)
add_gtest_executable
(
test_layernorm2d_fp32 test_layernorm2d_fp32.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_layernorm2d_fp32 PRIVATE utility device_normalization_instance
)
target_link_libraries
(
test_groupnorm_fp32 PRIVATE utility device_normalization_instance
)
add_dependencies
(
test_normalization test_layernorm2d_fp32
)
endif
()
add_gtest_executable
(
test_groupnorm_fp32 test_groupnorm_fp32.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_groupnorm_fp32 PRIVATE utility device_normalization_instance
)
add_dependencies
(
test_normalization test_groupnorm_fp32
)
endif
()
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
add_gtest_executable
(
test_layernorm2d_fp16 test_layernorm2d_fp16.cpp
)
add_gtest_executable
(
test_groupnorm_fp16 test_groupnorm_fp16.cpp
)
add_gtest_executable
(
test_layernorm2d_fp16 test_layernorm2d_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_layernorm2d_fp16 PRIVATE utility device_normalization_instance
)
target_link_libraries
(
test_groupnorm_fp16 PRIVATE utility device_normalization_instance
)
add_dependencies
(
test_normalization test_layernorm2d_fp16
)
endif
()
add_gtest_executable
(
test_groupnorm_fp16 test_groupnorm_fp16.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_groupnorm_fp16 PRIVATE utility device_normalization_instance
)
add_dependencies
(
test_normalization test_groupnorm_fp16
)
endif
()
Prev
1
…
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment