Unverified Commit 992f71e3 authored by JD's avatar JD Committed by GitHub
Browse files

Update test CMakeLists to add new tests automatically and add Jenkins stage for tests (#88)



* add docker file and make default target buildable

* add Jenkinsfile

* remove empty env block

* fix package stage

* remove render group from docker run

* clean up Jenkins file

* add cppcheck as dev dependency

* update cmake file

* Add profiler build stage

* add hip_version config file for reduction operator

* correct jenkins var name

* Build release instead of debug

* Update test CMakeLists.txt
reorg test dir
add test stage

* reduce compile threads to prevent compiler crash

* add optional debug stage, update second test

* remove old test target

* fix tests to return proper results and self review

* Fix package name and make test run without args

* change Dockerfile to ues rocm4.3.1

* remove parallelism from build

* Lower paralellism
Co-authored-by: default avatarChao Liu <chao.liu2@amd.com>
parent 6d4450ef
...@@ -240,9 +240,8 @@ file(GLOB_RECURSE DEVICE_OPS_SOURCE "device_operation/*.cpp") ...@@ -240,9 +240,8 @@ file(GLOB_RECURSE DEVICE_OPS_SOURCE "device_operation/*.cpp")
set(CK_HEADERS ${COMPOSABLE_KERNEL_HEADERS} ${DEVICE_OPS_HEADERS}) set(CK_HEADERS ${COMPOSABLE_KERNEL_HEADERS} ${DEVICE_OPS_HEADERS})
set(CK_SOURCE ${DEVICE_OPS_SOURCE}) set(CK_SOURCE ${DEVICE_OPS_SOURCE})
add_library(composable_kernel add_library(composable_kernel ${CK_SOURCE})
${CK_SOURCE}
)
target_include_directories(composable_kernel PUBLIC target_include_directories(composable_kernel PUBLIC
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/composable_kernel/include> $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/composable_kernel/include>
......
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG ROCMVERSION=4.5 ARG ROCMVERSION=4.3.1
ARG OSDB_BKC_VERSION ARG OSDB_BKC_VERSION
RUN set -xe RUN set -xe
......
...@@ -17,7 +17,7 @@ def cmake_build(Map conf=[:]){ ...@@ -17,7 +17,7 @@ def cmake_build(Map conf=[:]){
def compiler = conf.get("compiler","/opt/rocm/bin/hipcc") def compiler = conf.get("compiler","/opt/rocm/bin/hipcc")
def config_targets = conf.get("config_targets","check") def config_targets = conf.get("config_targets","check")
def debug_flags = "-g -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=undefined " + conf.get("extradebugflags", "") def debug_flags = "-g -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=undefined " + conf.get("extradebugflags", "")
def build_envs = "CTEST_PARALLEL_LEVEL=4 MIOPEN_CONV_PRECISE_ROCBLAS_TIMING=0 " + conf.get("build_env","") def build_envs = "CTEST_PARALLEL_LEVEL=4 " + conf.get("build_env","")
def prefixpath = conf.get("prefixpath","/opt/rocm") def prefixpath = conf.get("prefixpath","/opt/rocm")
def setup_args = conf.get("setup_args","") def setup_args = conf.get("setup_args","")
...@@ -60,7 +60,7 @@ def cmake_build(Map conf=[:]){ ...@@ -60,7 +60,7 @@ def cmake_build(Map conf=[:]){
cd build cd build
""" """
def setup_cmd = conf.get("setup_cmd", "${cmake_envs} cmake ${setup_args} .. ") def setup_cmd = conf.get("setup_cmd", "${cmake_envs} cmake ${setup_args} .. ")
def build_cmd = conf.get("build_cmd", "${build_envs} dumb-init make -j\$(nproc) ${config_targets}") def build_cmd = conf.get("build_cmd", "${build_envs} dumb-init make -j\$(( \$(nproc) / 4 )) ${config_targets}")
def execute_cmd = conf.get("execute_cmd", "") def execute_cmd = conf.get("execute_cmd", "")
def cmd = conf.get("cmd", """ def cmd = conf.get("cmd", """
...@@ -177,15 +177,27 @@ pipeline { ...@@ -177,15 +177,27 @@ pipeline {
// buildHipClangJobAndReboot(build_cmd: build_cmd, no_reboot:true, prefixpath: '/opt/rocm', build_type: 'debug') // buildHipClangJobAndReboot(build_cmd: build_cmd, no_reboot:true, prefixpath: '/opt/rocm', build_type: 'debug')
// } // }
// } // }
stage('Build Profiler: gfx908') stage('Build Profiler: Release, gfx908')
{ {
agent { label rocmnode("gfx908")} agent { label rocmnode("nogpu")}
environment{ environment{
setup_args = """ -D CMAKE_CXX_FLAGS="-DCK_AMD_GPU_GFX908 --amdgpu-target=gfx908 -O3 " -DBUILD_DEV=On """ setup_args = """ -D CMAKE_CXX_FLAGS="-DCK_AMD_GPU_GFX908 --amdgpu-target=gfx908 -O3 " -DBUILD_DEV=On """
build_cmd = "make -j\$(nproc) -k ckProfiler"
} }
steps{ steps{
buildHipClangJobAndReboot(setup_args:setup_args, build_cmd:build_cmd, no_reboot:true, build_type: 'Release') buildHipClangJobAndReboot(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Release')
}
}
stage('Build Profiler: Debug, gfx908')
{
agent { label rocmnode("nogpu")}
environment{
setup_args = """ -D CMAKE_CXX_FLAGS="-DCK_AMD_GPU_GFX908 --amdgpu-target=gfx908 -O3 " -DBUILD_DEV=On """
}
steps{
// until we stabilize debug build due to compiler crashes
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
buildHipClangJobAndReboot(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Debug')
}
} }
} }
stage('Clang Format') { stage('Clang Format') {
...@@ -207,6 +219,24 @@ pipeline { ...@@ -207,6 +219,24 @@ pipeline {
} }
} }
} }
stage("Tests")
{
parallel
{
stage("Run Tests: gfx908")
{
agent{ label rocmnode("gfx908")}
environment{
setup_args = """ -D CMAKE_CXX_FLAGS="-DCK_AMD_GPU_GFX908 --amdgpu-target=gfx908 -O3 " -DBUILD_DEV=On """
}
steps{
buildHipClangJobAndReboot(setup_args:setup_args, config_targets: "check", no_reboot:true, build_type: 'Release')
}
}
}
}
// enable after the cmake file supports packaging // enable after the cmake file supports packaging
// stage("Packages") { // stage("Packages") {
// when { // when {
...@@ -222,4 +252,4 @@ pipeline { ...@@ -222,4 +252,4 @@ pipeline {
// } // }
// } // }
} }
} }
\ No newline at end of file
add_subdirectory(host_tensor) add_subdirectory(host_tensor)
\ No newline at end of file
...@@ -5,4 +5,4 @@ ignore = pcre ...@@ -5,4 +5,4 @@ ignore = pcre
deps = deps =
-f dev-requirements.txt -f dev-requirements.txt
define = define =
BUILD_DEV=On BUILD_DEV=On
\ No newline at end of file
...@@ -13,40 +13,24 @@ include_directories(BEFORE ...@@ -13,40 +13,24 @@ include_directories(BEFORE
${PROJECT_SOURCE_DIR}/test/include ${PROJECT_SOURCE_DIR}/test/include
) )
# test_magic_number_division add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR})
set(MAGIC_NUMBER_DIVISISON_SOURCE magic_number_division/main.cpp) add_custom_target(tests)
add_executable(test_magic_number_division ${MAGIC_NUMBER_DIVISISON_SOURCE})
target_link_libraries(test_magic_number_division PRIVATE host_tensor) function(add_test_executeable TEST_NAME)
add_executable(${TEST_NAME} ${ARGN})
target_link_libraries(${TEST_NAME} PRIVATE host_tensor)
set(CONV2D_FWD_SOURCE conv2d_fwd/main.cpp) target_link_libraries(${TEST_NAME} PRIVATE device_gemm_instance)
target_link_libraries(${TEST_NAME} PRIVATE device_conv2d_fwd_instance)
add_executable(test_conv2d_fwd ${CONV2D_FWD_SOURCE}) add_test(NAME ${TEST_NAME} COMMAND $<TARGET_FILE:${TEST_NAME}> )
target_link_libraries(test_conv2d_fwd PRIVATE host_tensor) add_dependencies(tests ${TEST_NAME})
target_link_libraries(test_conv2d_fwd PRIVATE device_conv2d_fwd_instance) add_dependencies(check ${TEST_NAME})
endfunction(add_test_executeable TEST_NAME)
# test_split_k
set(SPLIT_K_SOURCE split_k/main.cpp)
add_executable(test_split_k ${SPLIT_K_SOURCE}) file(GLOB TESTS *.cpp)
target_link_libraries(test_split_k PRIVATE host_tensor)
target_link_libraries(test_split_k PRIVATE device_gemm_instance) foreach(TEST ${TESTS})
get_filename_component(BASE_NAME ${TEST} NAME_WE)
# test_conv_util message("adding test ${BASE_NAME}")
set(CONV_UTIL_SOURCE conv_util/main.cpp) add_test_executeable(test_${BASE_NAME} ${TEST})
add_executable(test_conv_util ${CONV_UTIL_SOURCE}) endforeach(TEST ${TESTS})
target_link_libraries(test_conv_util PRIVATE host_tensor)
# test_reference_conv_fwd
set(REFERENCE_CONV_FWD_SOURCE reference_conv_fwd/main.cpp)
add_executable(test_reference_conv_fwd ${REFERENCE_CONV_FWD_SOURCE})
target_link_libraries(test_reference_conv_fwd PRIVATE host_tensor)
# test_convnd_fwd_xdl
set(CONVND_FWD_XDL_SOURCE convnd_fwd_xdl/main.cpp)
add_executable(test_convnd_fwd_xdl ${CONVND_FWD_XDL_SOURCE})
target_link_libraries(test_convnd_fwd_xdl PRIVATE host_tensor)
# test space_filling_curve_
set(SPACE_FILLING_CURVE_SOURCE space_filling_curve/space_filling_curve.cpp)
add_executable(space_filling_curve ${SPACE_FILLING_CURVE_SOURCE})
target_link_libraries(space_filling_curve PRIVATE host_tensor)
...@@ -75,8 +75,12 @@ int main(int argc, char* argv[]) ...@@ -75,8 +75,12 @@ int main(int argc, char* argv[])
ck::index_t in_left_pad_w = 1; ck::index_t in_left_pad_w = 1;
ck::index_t in_right_pad_h = 1; ck::index_t in_right_pad_h = 1;
ck::index_t in_right_pad_w = 1; ck::index_t in_right_pad_w = 1;
if(argc == 1)
if(argc == 3) {
init_method = 1;
data_type = 0;
}
else if(argc == 3)
{ {
data_type = std::stoi(argv[1]); data_type = std::stoi(argv[1]);
init_method = std::stoi(argv[2]); init_method = std::stoi(argv[2]);
...@@ -275,33 +279,31 @@ int main(int argc, char* argv[]) ...@@ -275,33 +279,31 @@ int main(int argc, char* argv[])
if(success) if(success)
{ {
std::cout << "test conv2d fwd : Pass" << std::endl; std::cout << "test conv2d fwd : Pass" << std::endl;
return 0;
} }
else else
{ {
std::cout << "test conv2d fwd: Fail " << std::endl; std::cout << "test conv2d fwd: Fail " << std::endl;
return -1;
} }
}; };
int res = -1;
if(data_type == 0) if(data_type == 0)
{ {
Run(float(), float(), float()); res = Run(float(), float(), float());
} }
else if(data_type == 1) else if(data_type == 1)
{ {
Run(ck::half_t(), ck::half_t(), ck::half_t()); res = Run(ck::half_t(), ck::half_t(), ck::half_t());
} }
else if(data_type == 2) else if(data_type == 2)
{ {
Run(ushort(), ushort(), ushort()); res = Run(ushort(), ushort(), ushort());
} }
else if(data_type == 3) else if(data_type == 3)
{ {
Run(int8_t(), int8_t(), int8_t()); res = Run(int8_t(), int8_t(), int8_t());
}
else
{
return 1;
} }
return 0; return res;
} }
...@@ -161,11 +161,12 @@ int main(int, char*[]) ...@@ -161,11 +161,12 @@ int main(int, char*[])
if(pass) if(pass)
{ {
std::cout << "test magic number division: Pass" << std::endl; std::cout << "test magic number division: Pass" << std::endl;
return 0;
} }
else else
{ {
std::cout << "test magic number division: Fail" << std::endl; std::cout << "test magic number division: Fail" << std::endl;
return -1;
} }
return 1;
} }
...@@ -57,32 +57,24 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result) ...@@ -57,32 +57,24 @@ static bool check_out(const Tensor<T>& ref, const Tensor<T>& result)
return true; return true;
} }
int main(int argc, char* argv[]) struct gemmArgs
{ {
if(argc != 9) int layout;
{ int M;
printf("arg1: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n"); int N;
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n"); int K;
printf(" 2: A[k, m] * B[k, n] = C[m, n];\n"); int StrideA;
printf(" 3: A[k, m] * B[n, k] = C[m, n])\n"); int StrideB;
printf("arg2 to 7: M, N, K, StrideA, StrideB, StrideC KBatch\n"); int StrideC;
return 1; int KBatch;
} };
const int layout = static_cast<GemmMatrixLayout>(std::stoi(argv[1]));
const int M = std::stoi(argv[2]);
const int N = std::stoi(argv[3]);
const int K = std::stoi(argv[4]);
const int StrideA = std::stoi(argv[5]);
const int StrideB = std::stoi(argv[6]);
const int StrideC = std::stoi(argv[7]);
const int KBatch = std::stoi(argv[8]);
int test_gemm(const gemmArgs& args)
{
bool a_row_major, b_row_major, c_row_major; bool a_row_major, b_row_major, c_row_major;
switch(layout) switch(args.layout)
{ {
case GemmMatrixLayout::MK_KN_MN: case GemmMatrixLayout::MK_KN_MN:
a_row_major = true; a_row_major = true;
...@@ -121,10 +113,10 @@ int main(int argc, char* argv[]) ...@@ -121,10 +113,10 @@ int main(int argc, char* argv[])
} }
}; };
Tensor<float> a_m_k(f_host_tensor_descriptor(M, K, StrideA, a_row_major)); Tensor<float> a_m_k(f_host_tensor_descriptor(args.M, args.K, args.StrideA, a_row_major));
Tensor<float> b_k_n(f_host_tensor_descriptor(K, N, StrideB, b_row_major)); Tensor<float> b_k_n(f_host_tensor_descriptor(args.K, args.N, args.StrideB, b_row_major));
Tensor<float> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, c_row_major)); Tensor<float> c_m_n_host_result(f_host_tensor_descriptor(args.M, args.N, args.StrideC, c_row_major));
Tensor<float> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, c_row_major)); Tensor<float> c_m_n_device_result(f_host_tensor_descriptor(args.M, args.N, args.StrideC, c_row_major));
// init data // init data
std::size_t num_thread = std::thread::hardware_concurrency(); std::size_t num_thread = std::thread::hardware_concurrency();
...@@ -151,17 +143,17 @@ int main(int argc, char* argv[]) ...@@ -151,17 +143,17 @@ int main(int argc, char* argv[])
// add device GEMM instances // add device GEMM instances
std::vector<DeviceGemmNoOpPtr> gemm_ptrs; std::vector<DeviceGemmNoOpPtr> gemm_ptrs;
if(layout == GemmMatrixLayout::MK_KN_MN) if(args.layout == GemmMatrixLayout::MK_KN_MN)
{ {
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_mk_kn_mn_instances(gemm_ptrs); add_device_gemm_xdl_splitk_f32_f32_f32_mk_kn_mn_instances(gemm_ptrs);
} }
else if(layout == GemmMatrixLayout::MK_NK_MN) else if(args.layout == GemmMatrixLayout::MK_NK_MN)
{ {
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_mk_nk_mn_instances(gemm_ptrs); add_device_gemm_xdl_splitk_f32_f32_f32_mk_nk_mn_instances(gemm_ptrs);
} }
else if(layout == GemmMatrixLayout::KM_KN_MN) else if(args.layout == GemmMatrixLayout::KM_KN_MN)
{ {
ck::tensor_operation::device::device_gemm_instance:: ck::tensor_operation::device::device_gemm_instance::
add_device_gemm_xdl_splitk_f32_f32_f32_km_kn_mn_instances(gemm_ptrs); add_device_gemm_xdl_splitk_f32_f32_f32_km_kn_mn_instances(gemm_ptrs);
...@@ -179,16 +171,16 @@ int main(int argc, char* argv[]) ...@@ -179,16 +171,16 @@ int main(int argc, char* argv[])
gemm_ptr->MakeArgumentPointer(static_cast<float*>(a_device_buf.GetDeviceBuffer()), gemm_ptr->MakeArgumentPointer(static_cast<float*>(a_device_buf.GetDeviceBuffer()),
static_cast<float*>(b_device_buf.GetDeviceBuffer()), static_cast<float*>(b_device_buf.GetDeviceBuffer()),
static_cast<float*>(c_device_buf.GetDeviceBuffer()), static_cast<float*>(c_device_buf.GetDeviceBuffer()),
M, args.M,
N, args.N,
K, args.K,
StrideA, args.StrideA,
StrideB, args.StrideB,
StrideC, args.StrideC,
ck::tensor_operation::element_wise::PassThrough{}, ck::tensor_operation::element_wise::PassThrough{},
ck::tensor_operation::element_wise::PassThrough{}, ck::tensor_operation::element_wise::PassThrough{},
ck::tensor_operation::element_wise::PassThrough{}, ck::tensor_operation::element_wise::PassThrough{},
KBatch); args.KBatch);
auto invoker_ptr = gemm_ptr->MakeInvokerPointer(); auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
...@@ -205,7 +197,7 @@ int main(int argc, char* argv[]) ...@@ -205,7 +197,7 @@ int main(int argc, char* argv[])
success = true; success = true;
} }
} }
auto error_code = 0;
if(success) if(success)
{ {
std::cout << "test split k : Pass" << std::endl; std::cout << "test split k : Pass" << std::endl;
...@@ -213,6 +205,49 @@ int main(int argc, char* argv[]) ...@@ -213,6 +205,49 @@ int main(int argc, char* argv[])
else else
{ {
std::cout << "test split k: Fail " << std::endl; std::cout << "test split k: Fail " << std::endl;
error_code = -1; // test needs to report failure
}
return error_code;
}
int main(int argc, char* argv[])
{
std::vector<gemmArgs> test_cases;
if(argc == 1)
{
test_cases = {{0, 3, 3, 3, 3, 3, 3, 1}};
// JD: Populate with more and meaningful
return 0;
}
else if(argc == 9)
{
const int layout = static_cast<GemmMatrixLayout>(std::stoi(argv[1]));
const int M = std::stoi(argv[2]);
const int N = std::stoi(argv[3]);
const int K = std::stoi(argv[4]);
const int StrideA = std::stoi(argv[5]);
const int StrideB = std::stoi(argv[6]);
const int StrideC = std::stoi(argv[7]);
const int KBatch = std::stoi(argv[8]);
test_cases = {{layout, M, N, K, StrideA, StrideB, StrideC, KBatch}};
}
else
{
printf("arg1: matrix layout (0: A[m, k] * B[k, n] = C[m, n];\n");
printf(" 1: A[m, k] * B[n, k] = C[m, n];\n");
printf(" 2: A[k, m] * B[k, n] = C[m, n];\n");
printf(" 3: A[k, m] * B[n, k] = C[m, n])\n");
printf("arg2 to 7: M, N, K, StrideA, StrideB, StrideC KBatch\n");
return -1;
}
for(const auto& kinder: test_cases)
{
const auto res = test_gemm(kinder);
if(!res)
return -1;
} }
return 0; return 0;
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment