Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
63b152d6
Commit
63b152d6
authored
Oct 17, 2024
by
danyao12
Browse files
Merge branch 'develop' into ck_tile/fa_bwd_v3
parents
ae2d7d2b
14c3cfb1
Changes
132
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
1319 additions
and
49 deletions
+1319
-49
library/include/ck/library/reference_tensor_operation/gpu/reference_gemm.hpp
...library/reference_tensor_operation/gpu/reference_gemm.hpp
+245
-0
library/src/tensor_operation_instance/gpu/CMakeLists.txt
library/src/tensor_operation_instance/gpu/CMakeLists.txt
+13
-25
library/src/tensor_operation_instance/gpu/mha/CMakeLists.txt
library/src/tensor_operation_instance/gpu/mha/CMakeLists.txt
+16
-6
profiler/src/CMakeLists.txt
profiler/src/CMakeLists.txt
+6
-6
script/cmake-ck-dev.sh
script/cmake-ck-dev.sh
+4
-0
script/cmake-ck-release.sh
script/cmake-ck-release.sh
+4
-0
test/CMakeLists.txt
test/CMakeLists.txt
+5
-12
test/ck_tile/CMakeLists.txt
test/ck_tile/CMakeLists.txt
+1
-0
test/ck_tile/image_to_column/CMakeLists.txt
test/ck_tile/image_to_column/CMakeLists.txt
+4
-0
test/ck_tile/image_to_column/test_tile_image_to_column.cpp
test/ck_tile/image_to_column/test_tile_image_to_column.cpp
+142
-0
test/data_type/CMakeLists.txt
test/data_type/CMakeLists.txt
+5
-0
test/data_type/test_custom_type.cpp
test/data_type/test_custom_type.cpp
+874
-0
No files found.
library/include/ck/library/reference_tensor_operation/gpu/reference_gemm.hpp
0 → 100644
View file @
63b152d6
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iostream>
#include <sstream>
#include "ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/device_base.hpp"
namespace
ck
{
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
,
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
AccDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CDEElementwiseOperation
,
typename
ComputeTypeA
,
typename
ComputeTypeB
>
__global__
void
#if CK_USE_LAUNCH_BOUNDS
__launch_bounds__
(
CK_MAX_THREAD_PER_BLOCK
,
CK_MIN_BLOCK_PER_CU
)
#endif
naive_gemm_kernel
(
const
ADataType
*
__restrict__
p_a_grid
,
const
BDataType
*
__restrict__
p_b_grid
,
CDataType
*
__restrict__
p_c_grid
,
index_t
m
,
index_t
n
,
index_t
k
,
const
AElementwiseOperation
a_element_op
,
const
BElementwiseOperation
b_element_op
,
const
CDEElementwiseOperation
c_element_op
)
{
using
RowMajor
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
const
int
row_idx
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
const
int
col_idx
=
blockIdx
.
y
*
blockDim
.
y
+
threadIdx
.
y
;
if
(
row_idx
<
m
&&
col_idx
<
n
)
{
AccDataType
v_acc
=
static_cast
<
AccDataType
>
(
0.0
);
ComputeTypeA
v_a
=
static_cast
<
ComputeTypeA
>
(
0.0
);
ComputeTypeB
v_b
=
static_cast
<
ComputeTypeB
>
(
0.0
);
CDataType
v_c
=
static_cast
<
CDataType
>
(
0.0
);
for
(
int
k_idx
=
0
;
k_idx
<
k
;
++
k_idx
)
{
// check input matrices layout
int
element_idx_a
=
0
;
int
element_idx_b
=
0
;
if
constexpr
(
std
::
is_same_v
<
ALayout
,
RowMajor
>
)
{
element_idx_a
=
row_idx
*
k
+
k_idx
;
}
else
{
element_idx_a
=
row_idx
+
m
*
k_idx
;
}
if
constexpr
(
std
::
is_same_v
<
BLayout
,
RowMajor
>
)
{
element_idx_b
=
k_idx
*
n
+
col_idx
;
}
else
{
element_idx_b
=
k_idx
+
k
*
col_idx
;
}
// apply a_element_op
a_element_op
(
v_a
,
p_a_grid
[
element_idx_a
]);
// apply b_element_op
b_element_op
(
v_b
,
p_b_grid
[
element_idx_b
]);
// multiply and accumulate
v_acc
+=
static_cast
<
AccDataType
>
(
v_a
)
*
static_cast
<
AccDataType
>
(
v_b
);
}
// apply c_element_op
c_element_op
(
v_c
,
v_acc
);
// check output matrix layout
int
element_idx_c
=
0
;
if
constexpr
(
std
::
is_same_v
<
CLayout
,
RowMajor
>
)
{
element_idx_c
=
row_idx
*
n
+
col_idx
;
}
else
{
element_idx_c
=
row_idx
+
m
*
col_idx
;
}
// prepare output
p_c_grid
[
element_idx_c
]
=
v_c
;
}
}
}
// namespace ck
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
template
<
typename
ALayout
,
typename
BLayout
,
typename
CLayout
,
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
AccDataType
,
typename
AElementwiseOperation
,
typename
BElementwiseOperation
,
typename
CElementwiseOperation
,
typename
ComputeTypeA
=
CDataType
,
typename
ComputeTypeB
=
ComputeTypeA
>
struct
ReferenceGemm
:
public
device
::
BaseOperator
{
// Argument
struct
Argument
:
public
device
::
BaseArgument
{
Argument
(
const
void
*
p_a_grid
,
const
void
*
p_b_grid
,
void
*
p_c_grid
,
index_t
m
,
index_t
n
,
index_t
k
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
)
:
p_a_grid_
{
static_cast
<
const
ADataType
*>
(
p_a_grid
)},
p_b_grid_
{
static_cast
<
const
BDataType
*>
(
p_b_grid
)},
p_c_grid_
{
static_cast
<
CDataType
*>
(
p_c_grid
)},
m_
{
m
},
n_
{
n
},
k_
{
k
},
a_element_op_
{
a_element_op
},
b_element_op_
{
b_element_op
},
c_element_op_
{
c_element_op
}
{
}
const
ADataType
*
p_a_grid_
;
const
BDataType
*
p_b_grid_
;
CDataType
*
p_c_grid_
;
index_t
m_
;
index_t
n_
;
index_t
k_
;
AElementwiseOperation
a_element_op_
;
BElementwiseOperation
b_element_op_
;
CElementwiseOperation
c_element_op_
;
};
// Invoker
struct
Invoker
:
public
device
::
BaseInvoker
{
using
Argument
=
ReferenceGemm
::
Argument
;
float
Run
(
const
Argument
&
arg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
{
int
block_size
=
16
;
dim3
block_dim
(
block_size
,
block_size
,
1
);
dim3
grid_dim
(
(
arg
.
m_
+
block_size
-
1
)
/
block_size
,
(
arg
.
n_
+
block_size
-
1
)
/
block_size
,
1
);
auto
launch_kernel
=
[
&
]()
{
const
auto
kernel
=
naive_gemm_kernel
<
ALayout
,
BLayout
,
CLayout
,
ADataType
,
BDataType
,
CDataType
,
AccDataType
,
AElementwiseOperation
,
BElementwiseOperation
,
CElementwiseOperation
,
ComputeTypeA
,
ComputeTypeB
>
;
return
launch_and_time_kernel
(
stream_config
,
kernel
,
grid_dim
,
block_dim
,
0
,
arg
.
p_a_grid_
,
arg
.
p_b_grid_
,
arg
.
p_c_grid_
,
arg
.
m_
,
arg
.
n_
,
arg
.
k_
,
arg
.
a_element_op_
,
arg
.
b_element_op_
,
arg
.
c_element_op_
);
};
return
launch_kernel
();
}
float
Run
(
const
device
::
BaseArgument
*
p_arg
,
const
StreamConfig
&
stream_config
=
StreamConfig
{})
override
{
return
Run
(
*
dynamic_cast
<
const
Argument
*>
(
p_arg
),
stream_config
);
}
};
bool
IsSupportedArgument
(
const
device
::
BaseArgument
*
)
override
{
return
true
;
}
static
auto
MakeArgument
(
const
void
*
p_a_grid
,
const
void
*
p_b_grid
,
void
*
p_c_grid
,
index_t
m
,
index_t
n
,
index_t
k
,
AElementwiseOperation
a_element_op
,
BElementwiseOperation
b_element_op
,
CElementwiseOperation
c_element_op
)
{
return
Argument
{
p_a_grid
,
p_b_grid
,
p_c_grid
,
m
,
n
,
k
,
a_element_op
,
b_element_op
,
c_element_op
};
}
static
auto
MakeInvoker
()
{
return
Invoker
{};
}
virtual
std
::
unique_ptr
<
device
::
BaseInvoker
>
MakeInvokerPointer
()
{
return
std
::
make_unique
<
Invoker
>
(
Invoker
{});
}
std
::
string
GetTypeString
()
const
override
{
auto
str
=
std
::
stringstream
();
// clang-format off
str
<<
"Device Reference Gemm"
<<
std
::
endl
;
// clang-format on
return
str
.
str
();
}
};
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/CMakeLists.txt
View file @
63b152d6
...
...
@@ -37,11 +37,7 @@ function(add_instance_library INSTANCE_NAME)
endforeach
()
endif
()
if
(
INSTANCES_ONLY
)
set
(
INST_TARGETS
${
DEFAULT_GPU_TARGETS
}
)
else
()
set
(
INST_TARGETS
${
GPU_TARGETS
}
)
endif
()
set
(
INST_TARGETS
${
SUPPORTED_GPU_TARGETS
}
)
# Do not build DL instances if DL_KERNELS macro is not set
foreach
(
source IN LISTS ARGN
)
...
...
@@ -64,9 +60,9 @@ function(add_instance_library INSTANCE_NAME)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
endforeach
()
# Do not build mha instances if gfx94 targets are not on the target list
# Do not build mha instances if gfx94
or gfx90a
targets are not on the target list
foreach
(
source IN LISTS ARGN
)
if
(
NOT INST_TARGETS MATCHES
"gfx94"
AND source MATCHES
"mha"
)
if
(
NOT INST_TARGETS MATCHES
"gfx94"
AND
NOT INST_TARGETS MATCHES
"gfx90a"
AND
source MATCHES
"mha"
)
message
(
"removing mha instance
${
source
}
"
)
list
(
REMOVE_ITEM ARGN
"
${
source
}
"
)
endif
()
...
...
@@ -75,17 +71,13 @@ function(add_instance_library INSTANCE_NAME)
if
(
ARGN
)
set
(
INST_OBJ
)
foreach
(
source IN LISTS ARGN
)
if
(
INSTANCES_ONLY
)
set
(
INST_TARGETS
${
DEFAULT_GPU_TARGETS
}
)
else
()
set
(
INST_TARGETS
${
GPU_TARGETS
}
)
endif
()
set
(
INST_TARGETS
${
SUPPORTED_GPU_TARGETS
}
)
if
(
source MATCHES
"_xdl"
)
list
(
REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx1030 gfx1100 gfx1101 gfx1102 gfx1103 gfx1200 gfx1201
)
elseif
(
ARGN MATCHES
"_wmma"
)
list
(
REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx908 gfx90a gfx940 gfx941 gfx942 gfx1030
)
elseif
(
ARGN MATCHES
"mha"
)
list
(
REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx908
gfx90a
gfx1030 gfx1100 gfx1101 gfx1102 gfx1103 gfx1200 gfx1201
)
list
(
REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx908 gfx1030 gfx1100 gfx1101 gfx1102 gfx1103 gfx1200 gfx1201
)
endif
()
set
(
offload_targets
)
foreach
(
target IN LISTS INST_TARGETS
)
...
...
@@ -102,12 +94,14 @@ function(add_instance_library INSTANCE_NAME)
set
(
FMHA_FWD_FAST_EXP2 true
)
endif
()
if
(
FMHA_FWD_FAST_EXP2
)
list
(
APPEND
EXAMPLE_FMHA_FWD
_COMPILE_OPTIONS -Wno-undefined-func-template -DCK_TILE_FMHA_FWD_FAST_EXP2=1 -fgpu-flush-denormals-to-zero
)
list
(
APPEND
FMHA
_COMPILE_OPTIONS -Wno-undefined-func-template -DCK_TILE_FMHA_FWD_FAST_EXP2=1 -fgpu-flush-denormals-to-zero
)
else
()
list
(
APPEND
EXAMPLE_FMHA_FWD
_COMPILE_OPTIONS -Wno-undefined-func-template -DCK_TILE_FMHA_FWD_FAST_EXP2=0
)
list
(
APPEND
FMHA
_COMPILE_OPTIONS -Wno-undefined-func-template -DCK_TILE_FMHA_FWD_FAST_EXP2=0
)
endif
()
list
(
APPEND EXAMPLE_FMHA_FWD_COMPILE_OPTIONS -Wno-float-equal
)
target_compile_options
(
device_mha_instance PRIVATE
${
EXAMPLE_FMHA_FWD_COMPILE_OPTIONS
}
)
list
(
APPEND FMHA_COMPILE_OPTIONS -Wno-float-equal
)
list
(
APPEND FMHA_COMPILE_OPTIONS -DCK_TILE_FMHA_FWD_SPLITKV_API=1
)
list
(
APPEND FMHA_COMPILE_OPTIONS -DCK_TILE_FMHA_FWD_APPENDKV_API=1
)
target_compile_options
(
device_mha_instance PRIVATE
${
FMHA_COMPILE_OPTIONS
}
)
endif
()
target_compile_features
(
${
INSTANCE_NAME
}
PUBLIC
)
...
...
@@ -189,12 +183,7 @@ FOREACH(subdir_path ${dir_list})
set
(
add_inst 1
)
endif
()
if
(
INSTANCES_ONLY
)
set
(
INST_TARGETS
${
DEFAULT_GPU_TARGETS
}
)
else
()
set
(
INST_TARGETS
${
GPU_TARGETS
}
)
endif
()
set
(
INST_TARGETS
${
SUPPORTED_GPU_TARGETS
}
)
if
((
"
${
cmake_instance
}
"
MATCHES
"quantization"
)
AND
(
DEFINED DTYPES
)
AND
(
NOT DTYPES MATCHES
"int8"
))
message
(
"quantization instances will not be built!"
)
...
...
@@ -318,8 +307,7 @@ if(CK_DEVICE_CONV_INSTANCES)
endif
()
if
(
CK_DEVICE_MHA_INSTANCES
)
set
(
gpu_list
${
INST_TARGETS
}
)
list
(
FILTER gpu_list INCLUDE REGEX
"^gfx94"
)
if
(
gpu_list
)
if
(
gpu_list MATCHES
"gfx94"
OR gpu_list MATCHES
"gfx90a"
)
add_library
(
device_mha_operations STATIC
${
CK_DEVICE_MHA_INSTANCES
}
)
add_library
(
composablekernels::device_mha_operations ALIAS device_mha_operations
)
target_compile_features
(
device_mha_operations PUBLIC
)
...
...
library/src/tensor_operation_instance/gpu/mha/CMakeLists.txt
View file @
63b152d6
...
...
@@ -32,23 +32,33 @@ if(EXISTS ${FMHA_CPP_FOLDER}/blob_list.txt)
file
(
REMOVE
${
FMHA_CPP_FOLDER
}
/blob_list.txt
)
endif
()
set
(
FMHA_KNOWN_APIS
"fwd,fwd_splitkv,fwd_appendkv,bwd"
)
# generate a list of kernels, but not actually emit files at config stage
# Note: The receipt 3 arg filters the generated backwards instances to reduce compilation time.
# With receipt 3 set, we are generating instances for datatype == {fp16 || bfp16}, bias == {no || alibi}, deterministic == off, and dpad == dvpad.
execute_process
(
COMMAND
${
PYTHON_EXECUTABLE
}
${
CMAKE_SOURCE_DIR
}
/example/ck_tile/01_fmha
/generate.py
COMMAND
${
PYTHON_EXECUTABLE
}
${
FMHA_SRC_FOLDER
}
/generate.py
--list_blobs
${
FMHA_CPP_FOLDER
}
/blob_list.txt
--api
${
FMHA_KNOWN_APIS
}
--receipt 3
RESULT_VARIABLE ret
)
if
(
ret AND NOT ret EQUAL 0
)
message
(
FATAL_ERROR
"CK Tile MHA FAILED to genrate a list of kernels via Python."
)
else
()
file
(
STRINGS
${
FMHA_CPP_FOLDER
}
/blob_list.txt FMHA_
FWD_
GEN_BLOBS
)
file
(
STRINGS
${
FMHA_CPP_FOLDER
}
/blob_list.txt FMHA_GEN_BLOBS
)
endif
()
# actually generate the kernel content now
# Note: The receipt 3 arg filters the generated backwards instances to reduce compilation time.
# With receipt 3 set, we are generating instances for datatype == {fp16 || bfp16}, bias == {no || alibi}, deterministic == off, and dpad == dvpad.
add_custom_command
(
OUTPUT
${
FMHA_
FWD_
GEN_BLOBS
}
COMMAND
${
PYTHON_EXECUTABLE
}
${
CMAKE_SOURCE_DIR
}
/example/ck_tile/01_fmha
/generate.py
OUTPUT
${
FMHA_GEN_BLOBS
}
COMMAND
${
PYTHON_EXECUTABLE
}
${
FMHA_SRC_FOLDER
}
/generate.py
--output_dir
${
FMHA_CPP_FOLDER
}
--api
${
FMHA_KNOWN_APIS
}
--receipt 3
COMMENT
"Generating mha kernel (cpp) files now ..."
VERBATIM
)
...
...
@@ -57,12 +67,12 @@ add_custom_command(
# have filename. Since, it was cauing the cmake
# to throw "File name too long"
set
(
device_files
)
foreach
(
filepath IN LISTS FMHA_
FWD_
GEN_BLOBS
)
foreach
(
filepath IN LISTS FMHA_GEN_BLOBS
)
get_filename_component
(
filename
${
filepath
}
NAME
)
# Append the filename to the device_files list
list
(
APPEND device_files
${
filename
}
)
endforeach
()
add_custom_target
(
generate_cpp_files DEPENDS
${
FMHA_
FWD_
GEN_BLOBS
}
)
add_custom_target
(
generate_cpp_files DEPENDS
${
FMHA_GEN_BLOBS
}
)
add_instance_library
(
device_mha_instance
${
device_files
}
)
...
...
profiler/src/CMakeLists.txt
View file @
63b152d6
...
...
@@ -24,7 +24,7 @@ set(PROFILER_SOURCES
profile_permute_scale.cpp
)
if
(
GPU_TARGETS MATCHES
"gfx9"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_contraction_bilinear.cpp
)
list
(
APPEND PROFILER_SOURCES profile_contraction_scale.cpp
)
...
...
@@ -49,7 +49,7 @@ if(GPU_TARGETS MATCHES "gfx9")
list
(
APPEND PROFILER_SOURCES profile_grouped_gemm_multiply_tile_loop.cpp
)
endif
()
list
(
APPEND PROFILER_SOURCES profile_gemm_multiply_add.cpp
)
if
(
GPU_TARGETS MATCHES
"gfx94"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx94"
)
list
(
APPEND PROFILER_SOURCES profile_gemm_multiply_multiply.cpp
)
list
(
APPEND PROFILER_SOURCES profile_gemm_ab_scale.cpp
)
endif
()
...
...
@@ -69,7 +69,7 @@ if(GPU_TARGETS MATCHES "gfx9")
endif
()
if
(
GPU_TARGETS MATCHES
"gfx11"
OR GPU_TARGETS MATCHES
"gfx12"
OR GPU_TARGETS MATCHES
"gfx9"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx11"
OR
SUPPORTED_
GPU_TARGETS MATCHES
"gfx12"
OR
SUPPORTED_
GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
list
(
APPEND PROFILER_SOURCES profile_gemm_bilinear.cpp
)
endif
()
...
...
@@ -111,7 +111,7 @@ target_link_libraries(${PROFILER_EXECUTABLE} PRIVATE device_column_to_image_inst
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_transpose_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_permute_scale_instance
)
if
(
GPU_TARGETS MATCHES
"gfx9"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx9"
)
if
(
DTYPES MATCHES
"fp32"
OR DTYPES MATCHES
"fp64"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_bilinear_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_contraction_scale_instance
)
...
...
@@ -135,7 +135,7 @@ if(GPU_TARGETS MATCHES "gfx9")
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_batched_gemm_reduce_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_multiply_add_instance
)
if
(
GPU_TARGETS MATCHES
"gfx94"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx94"
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_multiply_multiply_instance
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_ab_scale_instance
)
endif
()
...
...
@@ -159,7 +159,7 @@ if(GPU_TARGETS MATCHES "gfx9")
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_grouped_conv3d_fwd_convinvscale_instance
)
endif
()
if
(
GPU_TARGETS MATCHES
"gfx9"
OR GPU_TARGETS MATCHES
"gfx11"
OR GPU_TARGETS MATCHES
"gfx12"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx9"
OR
SUPPORTED_
GPU_TARGETS MATCHES
"gfx11"
OR
SUPPORTED_
GPU_TARGETS MATCHES
"gfx12"
)
if
(
DTYPES MATCHES
"fp16"
OR NOT DEFINED DTYPES
)
target_link_libraries
(
${
PROFILER_EXECUTABLE
}
PRIVATE device_gemm_bilinear_instance
)
endif
()
...
...
script/cmake-ck-dev.sh
View file @
63b152d6
...
...
@@ -7,8 +7,11 @@ MY_PROJECT_SOURCE=$1
if
[
$#
-ge
2
]
;
then
GPU_TARGETS
=
$2
shift
2
REST_ARGS
=
$@
else
GPU_TARGETS
=
"gfx908;gfx90a;gfx940"
REST_ARGS
=
fi
cmake
\
...
...
@@ -20,4 +23,5 @@ cmake
-D
GPU_TARGETS
=
$GPU_TARGETS
\
-D
CMAKE_VERBOSE_MAKEFILE:BOOL
=
ON
\
-D
USE_BITINT_EXTENSION_INT4
=
OFF
\
$REST_ARGS
\
${
MY_PROJECT_SOURCE
}
script/cmake-ck-release.sh
View file @
63b152d6
...
...
@@ -7,8 +7,11 @@ MY_PROJECT_SOURCE=$1
if
[
$#
-ge
2
]
;
then
GPU_TARGETS
=
$2
shift
2
REST_ARGS
=
$@
else
GPU_TARGETS
=
"gfx908;gfx90a;gfx940"
REST_ARGS
=
fi
cmake
\
...
...
@@ -20,5 +23,6 @@ cmake
-D
GPU_TARGETS
=
$GPU_TARGETS
\
-D
CMAKE_VERBOSE_MAKEFILE:BOOL
=
ON
\
-D
USE_BITINT_EXTENSION_INT4
=
OFF
\
$REST_ARGS
\
${
MY_PROJECT_SOURCE
}
test/CMakeLists.txt
View file @
63b152d6
...
...
@@ -41,11 +41,7 @@ function(add_test_executable TEST_NAME)
endforeach
()
endif
()
if
(
INSTANCES_ONLY
)
set
(
TEST_TARGETS
${
DEFAULT_GPU_TARGETS
}
)
else
()
set
(
TEST_TARGETS
${
GPU_TARGETS
}
)
endif
()
set
(
TEST_TARGETS
${
SUPPORTED_GPU_TARGETS
}
)
foreach
(
source IN LISTS ARGN
)
if
(
NOT DEFINED DL_KERNELS AND source MATCHES
"_dl"
)
...
...
@@ -122,11 +118,7 @@ function(add_gtest_executable TEST_NAME)
endforeach
()
endif
()
if
(
INSTANCES_ONLY
)
set
(
TEST_TARGETS
${
DEFAULT_GPU_TARGETS
}
)
else
()
set
(
TEST_TARGETS
${
GPU_TARGETS
}
)
endif
()
set
(
TEST_TARGETS
${
SUPPORTED_GPU_TARGETS
}
)
foreach
(
source IN LISTS ARGN
)
if
(
NOT DEFINED DL_KERNELS AND source MATCHES
"_dl"
)
...
...
@@ -173,6 +165,7 @@ function(add_gtest_executable TEST_NAME)
endfunction
()
add_compile_options
(
-Wno-c++20-extensions
)
add_subdirectory
(
ck_tile
)
add_subdirectory
(
magic_number_division
)
add_subdirectory
(
space_filling_curve
)
add_subdirectory
(
conv_util
)
...
...
@@ -210,10 +203,10 @@ add_subdirectory(conv_tensor_rearrange)
add_subdirectory
(
transpose
)
add_subdirectory
(
permute_scale
)
add_subdirectory
(
wrapper
)
if
(
GPU_TARGETS MATCHES
"gfx11"
)
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx11"
)
add_subdirectory
(
wmma_op
)
endif
()
if
(
GPU_TARGETS MATCHES
"gfx942"
AND CK_HIP_VERSION_MAJOR GREATER_EQUAL 6 AND CK_HIP_VERSION_MINOR GREATER_EQUAL 2
)
# smfmac needs ROCm6.2
if
(
SUPPORTED_
GPU_TARGETS MATCHES
"gfx942"
AND CK_HIP_VERSION_MAJOR GREATER_EQUAL 6 AND CK_HIP_VERSION_MINOR GREATER_EQUAL 2
)
# smfmac needs ROCm6.2
add_subdirectory
(
smfmac_op
)
endif
()
add_subdirectory
(
position_embedding
)
test/ck_tile/CMakeLists.txt
0 → 100644
View file @
63b152d6
add_subdirectory
(
image_to_column
)
test/ck_tile/image_to_column/CMakeLists.txt
0 → 100644
View file @
63b152d6
# Currently ck_tile is only built on gfx9
if
(
GPU_TARGETS MATCHES
"gfx9"
)
add_gtest_executable
(
test_tile_image_to_column test_tile_image_to_column.cpp
)
endif
()
test/ck_tile/image_to_column/test_tile_image_to_column.cpp
0 → 100644
View file @
63b152d6
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <algorithm>
#include <gtest/gtest.h>
#include "ck_tile/host.hpp"
#include "ck_tile/core.hpp"
#include "ck_tile/host/kernel_launch.hpp"
#include "ck_tile/ops/image_to_column.hpp"
// Host API implementation
template
<
typename
DataType
>
class
TestCkTileImageToColumn
:
public
::
testing
::
Test
{
static
constexpr
ck_tile
::
index_t
VectorSize
=
1
;
static
constexpr
ck_tile
::
index_t
NDimSpatial
=
2
;
protected:
void
Run
(
const
ck_tile
::
conv
::
ConvParam
conv_params
)
{
using
ImLayout
=
ck_tile
::
tensor_layout
::
convolution
::
NHWGC
;
const
auto
G
=
conv_params
.
G_
;
const
auto
N
=
conv_params
.
N_
;
const
auto
C
=
conv_params
.
C_
;
const
ck_tile
::
long_index_t
NDoHoWo
=
N
*
std
::
accumulate
(
conv_params
.
output_spatial_lengths_
.
begin
(),
std
::
next
(
conv_params
.
output_spatial_lengths_
.
begin
(),
NDimSpatial
),
1
,
std
::
multiplies
<>
());
const
ck_tile
::
long_index_t
CZYX
=
C
*
std
::
accumulate
(
conv_params
.
filter_spatial_lengths_
.
begin
(),
std
::
next
(
conv_params
.
filter_spatial_lengths_
.
begin
(),
NDimSpatial
),
1
,
std
::
multiplies
<>
());
const
auto
in_desc
=
ck_tile
::
conv
::
make_input_host_tensor_descriptor_g_n_c_wis_packed
<
ImLayout
>
(
conv_params
);
const
auto
out_desc
=
ck_tile
::
HostTensorDescriptor
({
G
,
NDoHoWo
,
CZYX
});
// host verify
ck_tile
::
HostTensor
<
DataType
>
in
(
in_desc
);
ck_tile
::
HostTensor
<
DataType
>
out_device
(
out_desc
);
ck_tile
::
HostTensor
<
DataType
>
out_host
(
out_desc
);
std
::
cout
<<
"input: "
<<
in
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"output: "
<<
out_device
.
mDesc
<<
std
::
endl
;
ck_tile
::
FillUniformDistributionIntegerValue
<
DataType
>
{
-
5.
f
,
5.
f
}(
in
);
ck_tile
::
DeviceMem
in_device_buf
(
in
.
get_element_space_size_in_bytes
());
ck_tile
::
DeviceMem
out_device_buf
(
out_device
.
get_element_space_size_in_bytes
());
in_device_buf
.
ToDevice
(
in
.
data
());
using
thread_tile
=
ck_tile
::
sequence
<
4
,
4
>
;
using
warp_tile
=
ck_tile
::
sequence
<
8
,
128
>
;
using
block_tile
=
ck_tile
::
sequence
<
32
,
128
>
;
using
Shape
=
ck_tile
::
TileImageToColumnShape
<
thread_tile
,
warp_tile
,
block_tile
>
;
using
PipelineProblem
=
ck_tile
::
BlockImageToColumnProblem
<
DataType
,
DataType
,
Shape
,
NDimSpatial
,
VectorSize
,
VectorSize
>
;
using
Kernel
=
ck_tile
::
ImageToColumn
<
PipelineProblem
>
;
auto
kargs
=
Kernel
::
MakeKargs
(
in_device_buf
.
GetDeviceBuffer
(),
out_device_buf
.
GetDeviceBuffer
(),
G
,
N
,
C
,
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
input_spatial_lengths_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
filter_spatial_lengths_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
output_spatial_lengths_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
+
3
>
(
in_desc
.
get_strides
()),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
3
>
(
out_desc
.
get_strides
()),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
conv_filter_strides_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
conv_filter_dilations_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
input_left_pads_
),
ck_tile
::
to_array
<
ck_tile
::
long_index_t
,
NDimSpatial
>
(
conv_params
.
input_right_pads_
));
const
dim3
grids
=
Kernel
::
GridSize
(
kargs
.
N
*
kargs
.
output_spatial_lengths
[
0
]
*
kargs
.
output_spatial_lengths
[
1
],
kargs
.
filter_spatial_lengths
[
0
]
*
kargs
.
filter_spatial_lengths
[
1
]
*
kargs
.
C
,
kargs
.
G
);
constexpr
dim3
blocks
=
Kernel
::
BlockSize
();
constexpr
ck_tile
::
index_t
kBlockPerCu
=
2
;
ck_tile
::
launch_kernel
(
ck_tile
::
stream_config
{},
ck_tile
::
make_kernel
<
blocks
.
x
,
kBlockPerCu
>
(
Kernel
{},
grids
,
blocks
,
0
,
kargs
));
// reference
ck_tile
::
reference_im2col
<
DataType
,
DataType
,
NDimSpatial
>
(
in
,
out_host
,
conv_params
);
out_device_buf
.
FromDevice
(
out_device
.
data
());
bool
pass
=
ck_tile
::
check_err
(
out_device
,
out_host
);
EXPECT_TRUE
(
pass
);
}
};
class
TestCkTileImageToColumnFloat
:
public
TestCkTileImageToColumn
<
float
>
{
};
class
TestCkTileImageToColumnHalf
:
public
TestCkTileImageToColumn
<
ck_tile
::
half_t
>
{
};
TEST_F
(
TestCkTileImageToColumnFloat
,
TestCorrectness
)
{
this
->
Run
({
2
,
2
,
4
,
1
,
192
,
{
3
,
3
},
{
28
,
28
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
Run
({
2
,
2
,
64
,
1
,
64
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
Run
({
2
,
1
,
64
,
1
,
64
,
{
1
,
1
},
{
7
,
7
},
{
3
,
3
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
Run
({
2
,
1
,
64
,
1
,
64
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
Run
({
2
,
2
,
64
,
1
,
64
,
{
3
,
3
},
{
28
,
28
},
{
2
,
2
},
{
2
,
2
},
{
1
,
1
},
{
1
,
1
}});
}
TEST_F
(
TestCkTileImageToColumnHalf
,
TestCorrectness
)
{
this
->
Run
({
2
,
2
,
4
,
1
,
192
,
{
3
,
3
},
{
28
,
28
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
Run
({
2
,
2
,
64
,
1
,
64
,
{
3
,
3
},
{
14
,
14
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
{
1
,
1
}});
this
->
Run
({
2
,
1
,
64
,
1
,
64
,
{
1
,
1
},
{
7
,
7
},
{
3
,
3
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
Run
({
2
,
1
,
64
,
1
,
64
,
{
1
,
1
},
{
3
,
3
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
},
{
0
,
0
}});
this
->
Run
({
2
,
2
,
64
,
1
,
64
,
{
3
,
3
},
{
28
,
28
},
{
2
,
2
},
{
2
,
2
},
{
1
,
1
},
{
1
,
1
}});
}
test/data_type/CMakeLists.txt
View file @
63b152d6
...
...
@@ -18,4 +18,9 @@ if(result EQUAL 0)
target_link_libraries
(
test_bf8 PRIVATE utility
)
endif
()
add_gtest_executable
(
test_custom_type test_custom_type.cpp
)
if
(
result EQUAL 0
)
target_link_libraries
(
test_custom_type PRIVATE utility
)
endif
()
add_gtest_executable
(
test_type_convert_const type_convert_const.cpp
)
test/data_type/test_custom_type.cpp
0 → 100644
View file @
63b152d6
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include "gtest/gtest.h"
#include "ck/utility/data_type.hpp"
#include "ck/utility/type_convert.hpp"
using
ck
::
bf8_t
;
using
ck
::
bhalf_t
;
using
ck
::
f8_t
;
using
ck
::
half_t
;
using
ck
::
Number
;
using
ck
::
type_convert
;
using
ck
::
vector_type
;
TEST
(
Custom_bool
,
TestSize
)
{
struct
custom_bool_t
{
bool
data
;
};
ASSERT_EQ
(
sizeof
(
custom_bool_t
),
sizeof
(
bool
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
2
>
),
sizeof
(
vector_type
<
bool
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
4
>
),
sizeof
(
vector_type
<
bool
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
8
>
),
sizeof
(
vector_type
<
bool
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
16
>
),
sizeof
(
vector_type
<
bool
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
32
>
),
sizeof
(
vector_type
<
bool
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bool_t
,
64
>
),
sizeof
(
vector_type
<
bool
,
64
>
));
}
TEST
(
Custom_bool
,
TestAsType
)
{
struct
custom_bool_t
{
using
type
=
bool
;
type
data
;
custom_bool_t
()
:
data
{
type
{}}
{}
custom_bool_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
bool
>
test_vec
=
{
false
,
true
,
false
,
true
};
// reference vector
vector_type
<
custom_bool_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{}).
data
,
false
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{})
=
custom_bool_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_bool_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_bool
,
TestAsTypeReshape
)
{
struct
custom_bool_t
{
using
type
=
bool
;
type
data
;
custom_bool_t
()
:
data
{
type
{}}
{}
custom_bool_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
bool
>
test_vec
=
{
false
,
true
,
false
,
true
};
// reference vector
vector_type
<
custom_bool_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{}).
data
,
false
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{})
=
custom_bool_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_bool_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_bool_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bool_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_int8
,
TestSize
)
{
struct
custom_int8_t
{
int8_t
data
;
};
ASSERT_EQ
(
sizeof
(
custom_int8_t
),
sizeof
(
int8_t
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
2
>
),
sizeof
(
vector_type
<
int8_t
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
4
>
),
sizeof
(
vector_type
<
int8_t
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
8
>
),
sizeof
(
vector_type
<
int8_t
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
16
>
),
sizeof
(
vector_type
<
int8_t
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
32
>
),
sizeof
(
vector_type
<
int8_t
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_int8_t
,
64
>
),
sizeof
(
vector_type
<
int8_t
,
64
>
));
}
TEST
(
Custom_int8
,
TestAsType
)
{
struct
custom_int8_t
{
using
type
=
int8_t
;
type
data
;
custom_int8_t
()
:
data
{
type
{}}
{}
custom_int8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
int8_t
>
test_vec
=
{
3
,
-
6
,
8
,
-
2
};
// reference vector
vector_type
<
custom_int8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{})
=
custom_int8_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_int8_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_int8
,
TestAsTypeReshape
)
{
struct
custom_int8_t
{
using
type
=
int8_t
;
type
data
;
custom_int8_t
()
:
data
{
type
{}}
{}
custom_int8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
int8_t
>
test_vec
=
{
3
,
-
6
,
8
,
-
2
};
// reference vector
vector_type
<
custom_int8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{})
=
custom_int8_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_int8_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_int8_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_int8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_uint8
,
TestSize
)
{
struct
custom_uint8_t
{
uint8_t
data
;
};
ASSERT_EQ
(
sizeof
(
custom_uint8_t
),
sizeof
(
uint8_t
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
2
>
),
sizeof
(
vector_type
<
uint8_t
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
4
>
),
sizeof
(
vector_type
<
uint8_t
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
8
>
),
sizeof
(
vector_type
<
uint8_t
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
16
>
),
sizeof
(
vector_type
<
uint8_t
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
32
>
),
sizeof
(
vector_type
<
uint8_t
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_uint8_t
,
64
>
),
sizeof
(
vector_type
<
uint8_t
,
64
>
));
}
TEST
(
Custom_uint8
,
TestAsType
)
{
struct
custom_uint8_t
{
using
type
=
uint8_t
;
type
data
;
custom_uint8_t
()
:
data
{
type
{}}
{}
custom_uint8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
uint8_t
>
test_vec
=
{
3
,
6
,
8
,
2
};
// reference vector
vector_type
<
custom_uint8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{})
=
custom_uint8_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_uint8_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_uint8
,
TestAsTypeReshape
)
{
struct
custom_uint8_t
{
using
type
=
uint8_t
;
type
data
;
custom_uint8_t
()
:
data
{
type
{}}
{}
custom_uint8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
uint8_t
>
test_vec
=
{
3
,
6
,
8
,
2
};
// reference vector
vector_type
<
custom_uint8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{})
=
custom_uint8_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_uint8_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_uint8_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_uint8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_f8
,
TestSize
)
{
struct
custom_f8_t
{
_BitInt
(
8
)
data
;
};
ASSERT_EQ
(
sizeof
(
custom_f8_t
),
sizeof
(
_BitInt
(
8
)));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
2
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
4
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
8
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
16
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
32
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_f8_t
,
64
>
),
sizeof
(
vector_type
<
_BitInt
(
8
),
64
>
));
}
TEST
(
Custom_f8
,
TestAsType
)
{
struct
custom_f8_t
{
using
type
=
_BitInt
(
8
);
type
data
;
custom_f8_t
()
:
data
{
type
{}}
{}
custom_f8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
_BitInt
(
8
)
>
test_vec
=
{
type_convert
<
_BitInt
(
8
)
>
(
0.3
f
),
type_convert
<
_BitInt
(
8
)
>
(
-
0.6
f
),
type_convert
<
_BitInt
(
8
)
>
(
0.8
f
),
type_convert
<
_BitInt
(
8
)
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_f8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}(
[
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{})
=
custom_f8_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_f8_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_f8
,
TestAsTypeReshape
)
{
struct
custom_f8_t
{
using
type
=
_BitInt
(
8
);
type
data
;
custom_f8_t
()
:
data
{
type
{}}
{}
custom_f8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
_BitInt
(
8
)
>
test_vec
=
{
type_convert
<
_BitInt
(
8
)
>
(
0.3
f
),
type_convert
<
_BitInt
(
8
)
>
(
-
0.6
f
),
type_convert
<
_BitInt
(
8
)
>
(
0.8
f
),
type_convert
<
_BitInt
(
8
)
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_f8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}(
[
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{})
=
custom_f8_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_f8_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_f8_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_f8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_bf8
,
TestSize
)
{
struct
custom_bf8_t
{
unsigned
_BitInt
(
8
)
data
;
};
ASSERT_EQ
(
sizeof
(
custom_bf8_t
),
sizeof
(
unsigned
_BitInt
(
8
)));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
2
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
4
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
8
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
16
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
32
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bf8_t
,
64
>
),
sizeof
(
vector_type
<
unsigned
_BitInt
(
8
),
64
>
));
}
TEST
(
Custom_bf8
,
TestAsType
)
{
struct
custom_bf8_t
{
using
type
=
unsigned
_BitInt
(
8
);
type
data
;
custom_bf8_t
()
:
data
{
type
{}}
{}
custom_bf8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
unsigned
_BitInt
(
8
)
>
test_vec
=
{
type_convert
<
unsigned
_BitInt
(
8
)
>
(
0.3
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
-
0.6
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
0.8
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_bf8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}(
[
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{})
=
custom_bf8_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_bf8_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_bf8
,
TestAsTypeReshape
)
{
struct
custom_bf8_t
{
using
type
=
unsigned
_BitInt
(
8
);
type
data
;
custom_bf8_t
()
:
data
{
type
{}}
{}
custom_bf8_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
unsigned
_BitInt
(
8
)
>
test_vec
=
{
type_convert
<
unsigned
_BitInt
(
8
)
>
(
0.3
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
-
0.6
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
0.8
f
),
type_convert
<
unsigned
_BitInt
(
8
)
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_bf8_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}(
[
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{}).
data
,
0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{})
=
custom_bf8_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_bf8_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_bf8_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bf8_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_half
,
TestSize
)
{
struct
custom_half_t
{
half_t
data
;
};
ASSERT_EQ
(
sizeof
(
custom_half_t
),
sizeof
(
half_t
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
2
>
),
sizeof
(
vector_type
<
half_t
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
4
>
),
sizeof
(
vector_type
<
half_t
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
8
>
),
sizeof
(
vector_type
<
half_t
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
16
>
),
sizeof
(
vector_type
<
half_t
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
32
>
),
sizeof
(
vector_type
<
half_t
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_half_t
,
64
>
),
sizeof
(
vector_type
<
half_t
,
64
>
));
}
TEST
(
Custom_half
,
TestAsType
)
{
struct
custom_half_t
{
using
type
=
half_t
;
type
data
;
custom_half_t
()
:
data
{
type
{}}
{}
custom_half_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
half_t
>
test_vec
=
{
half_t
{
0.3
f
},
half_t
{
-
0.6
f
},
half_t
{
0.8
f
},
half_t
{
-
0.2
f
}};
// reference vector
vector_type
<
custom_half_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{}).
data
,
type_convert
<
half_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{})
=
custom_half_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_half_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_half
,
TestAsTypeReshape
)
{
struct
custom_half_t
{
using
type
=
half_t
;
type
data
;
custom_half_t
()
:
data
{
type
{}}
{}
custom_half_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
half_t
>
test_vec
=
{
half_t
{
0.3
f
},
half_t
{
-
0.6
f
},
half_t
{
0.8
f
},
half_t
{
-
0.2
f
}};
// reference vector
vector_type
<
custom_half_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{}).
data
,
type_convert
<
half_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{})
=
custom_half_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_half_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_half_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_half_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_bhalf
,
TestSize
)
{
struct
custom_bhalf_t
{
bhalf_t
data
;
};
ASSERT_EQ
(
sizeof
(
custom_bhalf_t
),
sizeof
(
bhalf_t
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
2
>
),
sizeof
(
vector_type
<
bhalf_t
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
4
>
),
sizeof
(
vector_type
<
bhalf_t
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
8
>
),
sizeof
(
vector_type
<
bhalf_t
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
16
>
),
sizeof
(
vector_type
<
bhalf_t
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
32
>
),
sizeof
(
vector_type
<
bhalf_t
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_bhalf_t
,
64
>
),
sizeof
(
vector_type
<
bhalf_t
,
64
>
));
}
TEST
(
Custom_bhalf
,
TestAsType
)
{
struct
custom_bhalf_t
{
using
type
=
bhalf_t
;
type
data
;
custom_bhalf_t
()
:
data
{
type
{}}
{}
custom_bhalf_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
bhalf_t
>
test_vec
=
{
type_convert
<
bhalf_t
>
(
0.3
f
),
type_convert
<
bhalf_t
>
(
-
0.6
f
),
type_convert
<
bhalf_t
>
(
0.8
f
),
type_convert
<
bhalf_t
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_bhalf_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{}).
data
,
type_convert
<
bhalf_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{})
=
custom_bhalf_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_bhalf_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_bhalf
,
TestAsTypeReshape
)
{
struct
custom_bhalf_t
{
using
type
=
bhalf_t
;
type
data
;
custom_bhalf_t
()
:
data
{
type
{}}
{}
custom_bhalf_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
bhalf_t
>
test_vec
=
{
type_convert
<
bhalf_t
>
(
0.3
f
),
type_convert
<
bhalf_t
>
(
-
0.6
f
),
type_convert
<
bhalf_t
>
(
0.8
f
),
type_convert
<
bhalf_t
>
(
-
0.2
f
)};
// reference vector
vector_type
<
custom_bhalf_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{}).
data
,
type_convert
<
bhalf_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{})
=
custom_bhalf_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_bhalf_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_bhalf_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_bhalf_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_float
,
TestSize
)
{
struct
custom_float_t
{
float
data
;
};
ASSERT_EQ
(
sizeof
(
custom_float_t
),
sizeof
(
float
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
2
>
),
sizeof
(
vector_type
<
float
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
4
>
),
sizeof
(
vector_type
<
float
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
8
>
),
sizeof
(
vector_type
<
float
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
16
>
),
sizeof
(
vector_type
<
float
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
32
>
),
sizeof
(
vector_type
<
float
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_float_t
,
64
>
),
sizeof
(
vector_type
<
float
,
64
>
));
}
TEST
(
Custom_float
,
TestAsType
)
{
struct
custom_float_t
{
using
type
=
float
;
type
data
;
custom_float_t
()
:
data
{
type
{}}
{}
custom_float_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
float
>
test_vec
=
{
0.3
f
,
-
0.6
f
,
0.8
f
,
-
0.2
f
};
// reference vector
vector_type
<
custom_float_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{}).
data
,
0.0
f
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{})
=
custom_float_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_float_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_float
,
TestAsTypeReshape
)
{
struct
custom_float_t
{
using
type
=
float
;
type
data
;
custom_float_t
()
:
data
{
type
{}}
{}
custom_float_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
float
>
test_vec
=
{
0.3
f
,
-
0.6
f
,
0.8
f
,
-
0.2
f
};
// reference vector
vector_type
<
custom_float_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{}).
data
,
0.0
f
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{})
=
custom_float_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_float_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_float_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_float_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_double
,
TestSize
)
{
struct
custom_double_t
{
double
data
;
};
ASSERT_EQ
(
sizeof
(
custom_double_t
),
sizeof
(
double
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
2
>
),
sizeof
(
vector_type
<
double
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
4
>
),
sizeof
(
vector_type
<
double
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
8
>
),
sizeof
(
vector_type
<
double
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
16
>
),
sizeof
(
vector_type
<
double
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
32
>
),
sizeof
(
vector_type
<
double
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
custom_double_t
,
64
>
),
sizeof
(
vector_type
<
double
,
64
>
));
}
TEST
(
Custom_double
,
TestAsType
)
{
struct
custom_double_t
{
using
type
=
double
;
type
data
;
custom_double_t
()
:
data
{
type
{}}
{}
custom_double_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
double
>
test_vec
=
{
0.3
,
0.6
,
0.8
,
0.2
};
// reference vector
vector_type
<
custom_double_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{}).
data
,
0.0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{})
=
custom_double_t
{
test_vec
.
at
(
i
)};
});
// copy the vector
vector_type
<
custom_double_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Custom_double
,
TestAsTypeReshape
)
{
struct
custom_double_t
{
using
type
=
double
;
type
data
;
custom_double_t
()
:
data
{
type
{}}
{}
custom_double_t
(
type
init
)
:
data
{
init
}
{}
};
// test size
const
int
size
=
4
;
std
::
vector
<
double
>
test_vec
=
{
0.3
,
0.6
,
0.8
,
0.2
};
// reference vector
vector_type
<
custom_double_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{}).
data
,
0.0
);
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{})
=
custom_double_t
{
test_vec
.
at
(
i
)};
});
// copy the first half of a vector
vector_type
<
custom_double_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
custom_double_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
custom_double_t
>()(
Number
<
i
>
{}).
data
,
test_vec
.
at
(
i
));
});
}
TEST
(
Complex_half
,
TestSize
)
{
struct
complex_half_t
{
half_t
real
;
half_t
img
;
};
ASSERT_EQ
(
sizeof
(
complex_half_t
),
sizeof
(
half_t
)
+
sizeof
(
half_t
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
2
>
),
sizeof
(
vector_type
<
half_t
,
2
>
)
+
sizeof
(
vector_type
<
half_t
,
2
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
4
>
),
sizeof
(
vector_type
<
half_t
,
4
>
)
+
sizeof
(
vector_type
<
half_t
,
4
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
8
>
),
sizeof
(
vector_type
<
half_t
,
8
>
)
+
sizeof
(
vector_type
<
half_t
,
8
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
16
>
),
sizeof
(
vector_type
<
half_t
,
16
>
)
+
sizeof
(
vector_type
<
half_t
,
16
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
32
>
),
sizeof
(
vector_type
<
half_t
,
32
>
)
+
sizeof
(
vector_type
<
half_t
,
32
>
));
ASSERT_EQ
(
sizeof
(
vector_type
<
complex_half_t
,
64
>
),
sizeof
(
vector_type
<
half_t
,
64
>
)
+
sizeof
(
vector_type
<
half_t
,
64
>
));
}
TEST
(
Complex_half
,
TestAlignment
)
{
struct
complex_half_t
{
half_t
real
;
half_t
img
;
};
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
2
>
),
alignof
(
vector_type
<
half_t
,
2
>
)
+
alignof
(
vector_type
<
half_t
,
2
>
));
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
4
>
),
alignof
(
vector_type
<
half_t
,
4
>
)
+
alignof
(
vector_type
<
half_t
,
4
>
));
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
8
>
),
alignof
(
vector_type
<
half_t
,
8
>
)
+
alignof
(
vector_type
<
half_t
,
8
>
));
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
16
>
),
alignof
(
vector_type
<
half_t
,
16
>
)
+
alignof
(
vector_type
<
half_t
,
16
>
));
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
32
>
),
alignof
(
vector_type
<
half_t
,
32
>
)
+
alignof
(
vector_type
<
half_t
,
32
>
));
ASSERT_EQ
(
alignof
(
vector_type
<
complex_half_t
,
64
>
),
alignof
(
vector_type
<
half_t
,
64
>
)
+
alignof
(
vector_type
<
half_t
,
64
>
));
}
TEST
(
Complex_half
,
TestAsType
)
{
struct
complex_half_t
{
using
type
=
half_t
;
type
real
;
type
img
;
complex_half_t
()
:
real
{
type
{}},
img
{
type
{}}
{}
complex_half_t
(
type
real_init
,
type
img_init
)
:
real
{
real_init
},
img
{
img_init
}
{}
};
// test size
const
int
size
=
4
;
// custom type number of elements
const
int
num_elem
=
sizeof
(
complex_half_t
)
/
sizeof
(
complex_half_t
::
type
);
std
::
vector
<
half_t
>
test_vec
=
{
half_t
{
0.3
f
},
half_t
{
-
0.6
f
},
half_t
{
0.8
f
},
half_t
{
-
0.2
f
},
half_t
{
0.5
f
},
half_t
{
-
0.7
f
},
half_t
{
0.9
f
},
half_t
{
-
0.3
f
}};
// reference vector
vector_type
<
complex_half_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
real
,
type_convert
<
half_t
>
(
0.0
f
));
ASSERT_EQ
(
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
img
,
type_convert
<
half_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{})
=
complex_half_t
{
test_vec
.
at
(
num_elem
*
i
),
test_vec
.
at
(
num_elem
*
i
+
1
)};
});
// copy the vector
vector_type
<
complex_half_t
,
size
>
left_vec
{
right_vec
};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
real
,
test_vec
.
at
(
num_elem
*
i
));
ASSERT_EQ
(
left_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
img
,
test_vec
.
at
(
num_elem
*
i
+
1
));
});
}
TEST
(
Complex_half
,
TestAsTypeReshape
)
{
struct
complex_half_t
{
using
type
=
half_t
;
type
real
;
type
img
;
complex_half_t
()
:
real
{
type
{}},
img
{
type
{}}
{}
complex_half_t
(
type
real_init
,
type
img_init
)
:
real
{
real_init
},
img
{
img_init
}
{}
};
// test size
const
int
size
=
4
;
// custom type number of elements
const
int
num_elem
=
sizeof
(
complex_half_t
)
/
sizeof
(
complex_half_t
::
type
);
std
::
vector
<
half_t
>
test_vec
=
{
half_t
{
0.3
f
},
half_t
{
-
0.6
f
},
half_t
{
0.8
f
},
half_t
{
-
0.2
f
},
half_t
{
0.5
f
},
half_t
{
-
0.7
f
},
half_t
{
0.9
f
},
half_t
{
-
0.3
f
}};
// reference vector
vector_type
<
complex_half_t
,
size
>
right_vec
;
// check default CTOR
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
real
,
type_convert
<
half_t
>
(
0.0
f
));
ASSERT_EQ
(
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
img
,
type_convert
<
half_t
>
(
0.0
f
));
});
// assign test values to the vector
ck
::
static_for
<
0
,
size
,
1
>
{}([
&
](
auto
i
)
{
right_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{})
=
complex_half_t
{
test_vec
.
at
(
num_elem
*
i
),
test_vec
.
at
(
num_elem
*
i
+
1
)};
});
// copy the first half of a vector
vector_type
<
complex_half_t
,
size
/
2
>
left_vec
{
right_vec
.
template
AsType
<
vector_type
<
complex_half_t
,
size
/
2
>
::
type
>
()(
Number
<
0
>
{})};
// check if values were copied correctly
ck
::
static_for
<
0
,
size
/
2
,
1
>
{}([
&
](
auto
i
)
{
ASSERT_EQ
(
left_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
real
,
test_vec
.
at
(
num_elem
*
i
));
ASSERT_EQ
(
left_vec
.
template
AsType
<
complex_half_t
>()(
Number
<
i
>
{}).
img
,
test_vec
.
at
(
num_elem
*
i
+
1
));
});
}
Prev
1
…
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment