"...models/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "26e34735bc935b0d54588dc32358171390ab593b"
Unverified Commit 5b178874 authored by Chao Liu's avatar Chao Liu Committed by GitHub
Browse files

Fix Tests build (#109)

* fix tests

* remove useless file

* fix test build

* reduce parallelism when compiling

* fix test
parent 7a9b93f4
...@@ -60,7 +60,8 @@ def cmake_build(Map conf=[:]){ ...@@ -60,7 +60,8 @@ def cmake_build(Map conf=[:]){
cd build cd build
""" """
def setup_cmd = conf.get("setup_cmd", "${cmake_envs} cmake ${setup_args} .. ") def setup_cmd = conf.get("setup_cmd", "${cmake_envs} cmake ${setup_args} .. ")
def build_cmd = conf.get("build_cmd", "${build_envs} dumb-init make -j\$(( \$(nproc) / 4 )) ${config_targets}") // reduce parallelism when compiling, clang uses too much memory
def build_cmd = conf.get("build_cmd", "${build_envs} dumb-init make -j\$(( \$(nproc) / 5 )) ${config_targets}")
def execute_cmd = conf.get("execute_cmd", "") def execute_cmd = conf.get("execute_cmd", "")
def cmd = conf.get("cmd", """ def cmd = conf.get("cmd", """
......
...@@ -21,34 +21,16 @@ function(add_test_executeable TEST_NAME) ...@@ -21,34 +21,16 @@ function(add_test_executeable TEST_NAME)
target_link_libraries(${TEST_NAME} PRIVATE host_tensor) target_link_libraries(${TEST_NAME} PRIVATE host_tensor)
target_link_libraries(${TEST_NAME} PRIVATE device_gemm_instance) target_link_libraries(${TEST_NAME} PRIVATE device_gemm_instance)
target_link_libraries(${TEST_NAME} PRIVATE device_conv2d_fwd_instance) target_link_libraries(${TEST_NAME} PRIVATE device_conv2d_fwd_instance)
target_link_libraries(${TEST_NAME} PRIVATE device_conv2d_bwd_data_instance)
add_test(NAME ${TEST_NAME} COMMAND $<TARGET_FILE:${TEST_NAME}> ) add_test(NAME ${TEST_NAME} COMMAND $<TARGET_FILE:${TEST_NAME}> )
add_dependencies(tests ${TEST_NAME}) add_dependencies(tests ${TEST_NAME})
add_dependencies(check ${TEST_NAME}) add_dependencies(check ${TEST_NAME})
endfunction(add_test_executeable TEST_NAME) endfunction(add_test_executeable TEST_NAME)
file(GLOB TESTS */*.cpp)
file(GLOB TESTS *.cpp)
foreach(TEST ${TESTS}) foreach(TEST ${TESTS})
get_filename_component(BASE_NAME ${TEST} NAME_WE) get_filename_component(BASE_NAME ${TEST} NAME_WE)
message("adding test ${BASE_NAME}") message("adding test ${BASE_NAME}")
add_test_executeable(test_${BASE_NAME} ${TEST}) add_test_executeable(test_${BASE_NAME} ${TEST})
endforeach(TEST ${TESTS}) endforeach(TEST ${TESTS})
# test_gemm_xdl_fp32
set(GEMM_XDL_FP32_SOURCE gemm_xdl/test_gemm_fp32.cpp)
add_executable(test_gemm_xdl_fp32 ${GEMM_XDL_FP32_SOURCE})
target_link_libraries(test_gemm_xdl_fp32 PRIVATE host_tensor)
target_link_libraries(test_gemm_xdl_fp32 PRIVATE device_gemm_instance)
# test_gemm_xdl_bf16
set(GEMM_XDL_BF16_SOURCE gemm_xdl/test_gemm_bf16.cpp)
add_executable(test_gemm_xdl_bf16 ${GEMM_XDL_BF16_SOURCE})
target_link_libraries(test_gemm_xdl_bf16 PRIVATE host_tensor)
target_link_libraries(test_gemm_xdl_bf16 PRIVATE device_gemm_instance)
# test_gemm_xdl_int8
set(GEMM_XDL_INT8_SOURCE gemm_xdl/test_gemm_int8.cpp)
add_executable(test_gemm_xdl_int8 ${GEMM_XDL_INT8_SOURCE})
target_link_libraries(test_gemm_xdl_int8 PRIVATE host_tensor)
target_link_libraries(test_gemm_xdl_int8 PRIVATE device_gemm_instance)
...@@ -11,8 +11,9 @@ ...@@ -11,8 +11,9 @@
using F16 = ck::half_t; using F16 = ck::half_t;
using F32 = float; using F32 = float;
using BF16 = ushort; using BF16 = ck::bhalf_t;
using INT8 = int8_t; using INT8 = int8_t;
namespace ck { namespace ck {
namespace tensor_operation { namespace tensor_operation {
namespace device { namespace device {
...@@ -22,6 +23,7 @@ using DeviceConvBwdDataNoOpPtr = ...@@ -22,6 +23,7 @@ using DeviceConvBwdDataNoOpPtr =
DeviceConvBwdDataPtr<ck::tensor_operation::element_wise::PassThrough, DeviceConvBwdDataPtr<ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise::PassThrough,
ck::tensor_operation::element_wise::PassThrough>; ck::tensor_operation::element_wise::PassThrough>;
void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f32_instances( void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f32_instances(
std::vector<DeviceConvBwdDataNoOpPtr>&); std::vector<DeviceConvBwdDataNoOpPtr>&);
void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f16_instances( void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_f16_instances(
...@@ -30,6 +32,7 @@ void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_bf16_instances( ...@@ -30,6 +32,7 @@ void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_bf16_instances(
std::vector<DeviceConvBwdDataNoOpPtr>&); std::vector<DeviceConvBwdDataNoOpPtr>&);
void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_int8_instances( void add_device_conv2d_bwd_data_xdl_nhwc_kyxc_nhwk_int8_instances(
std::vector<DeviceConvBwdDataNoOpPtr>&); std::vector<DeviceConvBwdDataNoOpPtr>&);
} // namespace device_conv2d_bwd_data_instance } // namespace device_conv2d_bwd_data_instance
} // namespace device } // namespace device
} // namespace tensor_operation } // namespace tensor_operation
...@@ -78,7 +81,12 @@ int main(int argc, char* argv[]) ...@@ -78,7 +81,12 @@ int main(int argc, char* argv[])
ck::index_t in_right_pad_h = 1; ck::index_t in_right_pad_h = 1;
ck::index_t in_right_pad_w = 1; ck::index_t in_right_pad_w = 1;
if(argc == 3) if(argc == 1)
{
data_type = 1;
init_method = 1;
}
else if(argc == 3)
{ {
data_type = std::stoi(argv[1]); data_type = std::stoi(argv[1]);
init_method = std::stoi(argv[2]); init_method = std::stoi(argv[2]);
...@@ -106,11 +114,9 @@ int main(int argc, char* argv[]) ...@@ -106,11 +114,9 @@ int main(int argc, char* argv[])
} }
else else
{ {
printf("arg1: data type (0=fp32 )\n"); printf("arg1: data type (0=fp32, 1=fp16, 2= bfp16, 3= int8_t )\n");
printf("arg2: verification (0=no, 1=yes)\n"); printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
printf("arg3: initialization (0=no init, 1=integer value, 2=decimal value)\n"); printf("arg3 to 17: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
printf("arg4: run kernel # of times (>1)\n");
printf("arg5 to 19: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx\n"); "RightPx\n");
exit(1); exit(1);
} }
...@@ -296,7 +302,7 @@ int main(int argc, char* argv[]) ...@@ -296,7 +302,7 @@ int main(int argc, char* argv[])
if(data_type == 0) if(data_type == 0)
{ {
Run(float(), float(), F32()); Run(F32(), F32(), F32());
} }
else if(data_type == 1) else if(data_type == 1)
{ {
......
...@@ -77,8 +77,8 @@ int main(int argc, char* argv[]) ...@@ -77,8 +77,8 @@ int main(int argc, char* argv[])
ck::index_t in_right_pad_w = 1; ck::index_t in_right_pad_w = 1;
if(argc == 1) if(argc == 1)
{ {
data_type = 1;
init_method = 1; init_method = 1;
data_type = 0;
} }
else if(argc == 3) else if(argc == 3)
{ {
...@@ -108,10 +108,9 @@ int main(int argc, char* argv[]) ...@@ -108,10 +108,9 @@ int main(int argc, char* argv[])
} }
else else
{ {
printf("arg1: verification (0=no, 1=yes)\n"); printf("arg1: data type (0=fp32, 1=fp16, 2= bfp16, 3= int8_t )\n");
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"); printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
printf("arg3: run kernel # of times (>1)\n"); printf("arg3 to 17: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
printf("arg4 to 18: N, K, C, Y, X, Hi, Wi, Sy, Sx, Dy, Dx, LeftPy, LeftPx, RightPy, "
"RightPx\n"); "RightPx\n");
exit(1); exit(1);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment