Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
7a3b49e5
Commit
7a3b49e5
authored
Jun 25, 2022
by
Chao Liu
Browse files
Merge remote-tracking branch 'origin/develop' into contraction
parents
e07b3d8e
d3051d75
Changes
592
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
799 additions
and
677 deletions
+799
-677
CMakeLists.txt
CMakeLists.txt
+0
-5
Jenkinsfile
Jenkinsfile
+162
-133
LICENSE
LICENSE
+28
-0
README.md
README.md
+1
-1
example/01_gemm/gemm_dl_fp16.cpp
example/01_gemm/gemm_dl_fp16.cpp
+14
-13
example/01_gemm/gemm_dl_fp32.cpp
example/01_gemm/gemm_dl_fp32.cpp
+14
-13
example/01_gemm/gemm_dl_int8.cpp
example/01_gemm/gemm_dl_int8.cpp
+14
-13
example/01_gemm/gemm_xdl_bf16.cpp
example/01_gemm/gemm_xdl_bf16.cpp
+14
-13
example/01_gemm/gemm_xdl_fp16.cpp
example/01_gemm/gemm_xdl_fp16.cpp
+36
-30
example/01_gemm/gemm_xdl_fp64.cpp
example/01_gemm/gemm_xdl_fp64.cpp
+15
-14
example/01_gemm/gemm_xdl_int8.cpp
example/01_gemm/gemm_xdl_int8.cpp
+15
-13
example/02_gemm_alpha_beta/gemm_xdl_alpha_beta.cpp
example/02_gemm_alpha_beta/gemm_xdl_alpha_beta.cpp
+14
-15
example/03_gemm_bias_relu/gemm_xdl_bias_relu.cpp
example/03_gemm_bias_relu/gemm_xdl_bias_relu.cpp
+166
-124
example/04_gemm_add_add_fastgelu/CMakeLists.txt
example/04_gemm_add_add_fastgelu/CMakeLists.txt
+1
-0
example/04_gemm_add_add_fastgelu/README.md
example/04_gemm_add_add_fastgelu/README.md
+23
-0
example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
..._gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
+248
-0
example/04_gemm_bias_relu_add/CMakeLists.txt
example/04_gemm_bias_relu_add/CMakeLists.txt
+0
-1
example/04_gemm_bias_relu_add/gemm_xdl_bias_relu_add.cpp
example/04_gemm_bias_relu_add/gemm_xdl_bias_relu_add.cpp
+0
-257
example/06_conv2d_fwd_bias_relu/conv2d_fwd_xdl_bias_relu.cpp
example/06_conv2d_fwd_bias_relu/conv2d_fwd_xdl_bias_relu.cpp
+15
-14
example/07_conv2d_fwd_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp
...conv2d_fwd_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp
+19
-18
No files found.
CMakeLists.txt
View file @
7a3b49e5
...
@@ -78,10 +78,6 @@ rocm_create_package(
...
@@ -78,10 +78,6 @@ rocm_create_package(
LDCONFIG
LDCONFIG
)
)
## half
set
(
HALF_INCLUDE_DIR
"
${
PROJECT_SOURCE_DIR
}
/external/include/half"
)
message
(
"HALF_INCLUDE_DIR:
${
HALF_INCLUDE_DIR
}
"
)
## tidy
## tidy
include
(
EnableCompilerWarnings
)
include
(
EnableCompilerWarnings
)
set
(
CK_TIDY_ERRORS ERRORS * -readability-inconsistent-declaration-parameter-name
)
set
(
CK_TIDY_ERRORS ERRORS * -readability-inconsistent-declaration-parameter-name
)
...
@@ -229,7 +225,6 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)
...
@@ -229,7 +225,6 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)
include_directories
(
BEFORE
include_directories
(
BEFORE
${
PROJECT_SOURCE_DIR
}
/include
${
PROJECT_SOURCE_DIR
}
/include
${
PROJECT_BINARY_DIR
}
/include
${
PROJECT_SOURCE_DIR
}
/library/include
${
PROJECT_SOURCE_DIR
}
/library/include
)
)
...
...
Jenkinsfile
View file @
7a3b49e5
...
@@ -7,7 +7,6 @@ def show_node_info() {
...
@@ -7,7 +7,6 @@ def show_node_info() {
echo "NODE_NAME = \$NODE_NAME"
echo "NODE_NAME = \$NODE_NAME"
lsb_release -sd
lsb_release -sd
uname -r
uname -r
cat /sys/module/amdgpu/version
ls /opt/ -la
ls /opt/ -la
"""
"""
}
}
...
@@ -100,35 +99,45 @@ def buildHipClangJob(Map conf=[:]){
...
@@ -100,35 +99,45 @@ def buildHipClangJob(Map conf=[:]){
def
variant
=
env
.
STAGE_NAME
def
variant
=
env
.
STAGE_NAME
def
retimage
def
retimage
gitStatusWrapper
(
credentialsId:
'7126e5fe-eb51-4576-b52b-9aaf1de8f0fd'
,
gitHubContext:
"Jenkins - ${variant}"
,
account:
'ROCmSoftwarePlatform'
,
repo:
'composable_kernel'
)
{
try
{
gitStatusWrapper
(
credentialsId:
"${status_wrapper_creds}"
,
gitHubContext:
"Jenkins - ${variant}"
,
account:
'ROCmSoftwarePlatform'
,
repo:
'composable_kernel'
)
{
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
'.'
)
if
(
params
.
USE_DOCKERFILE
){
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
try
{
timeout
(
time:
5
,
unit:
'MINUTES'
)
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
'.'
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
timeout
(
time:
5
,
unit:
'MINUTES'
)
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
}
}
}
}
}
}
catch
(
org
.
jenkinsci
.
plugins
.
workflow
.
steps
.
FlowInterruptedException
e
){
catch
(
org
.
jenkinsci
.
plugins
.
workflow
.
steps
.
FlowInterruptedException
e
){
echo
"The job was cancelled or aborted"
echo
"The job was cancelled or aborted"
throw
e
throw
e
}
}
catch
(
Exception
ex
)
{
catch
(
Exception
ex
)
{
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
"--no-cache ."
)
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
"--no-cache ."
)
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
timeout
(
time:
5
,
unit:
'MINUTES'
)
timeout
(
time:
5
,
unit:
'MINUTES'
)
{
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
}
}
}
}
}
}
}
else
{
timeout
(
time:
3
,
unit:
'HOURS'
){
retimage
=
docker
.
image
(
'compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-dkms-no-npi-hipclang:9110_ubuntu18.04_py3.6_pytorch_rocm5.0_internal_testing_7ff5b54'
).
pull
()
image
=
"b56f8ac0d6ea"
sh
"docker images"
}
}
withDockerContainer
(
image:
image
,
args:
dockerOpts
+
' -v=/var/jenkins/:/var/jenkins'
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
+
' -v=/var/jenkins/:/var/jenkins'
)
{
timeout
(
time:
5
,
unit:
'HOURS'
)
timeout
(
time:
5
,
unit:
'HOURS'
)
{
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
cmake_build
(
conf
)
cmake_build
(
conf
)
}
}
}
}
...
@@ -181,61 +190,92 @@ def runCKProfiler(Map conf=[:]){
...
@@ -181,61 +190,92 @@ def runCKProfiler(Map conf=[:]){
def
variant
=
env
.
STAGE_NAME
def
variant
=
env
.
STAGE_NAME
def
retimage
def
retimage
gitStatusWrapper
(
credentialsId:
'7126e5fe-eb51-4576-b52b-9aaf1de8f0fd'
,
gitHubContext:
"Jenkins - ${variant}"
,
account:
'ROCmSoftwarePlatform'
,
repo:
'composable_kernel'
)
{
try
{
gitStatusWrapper
(
credentialsId:
"${status_wrapper_creds}"
,
gitHubContext:
"Jenkins - ${variant}"
,
account:
'ROCmSoftwarePlatform'
,
repo:
'composable_kernel'
)
{
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
'.'
)
if
(
params
.
USE_DOCKERFILE
){
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
try
{
timeout
(
time:
5
,
unit:
'MINUTES'
)
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
'.'
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
timeout
(
time:
5
,
unit:
'MINUTES'
)
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
}
}
}
}
}
}
catch
(
org
.
jenkinsci
.
plugins
.
workflow
.
steps
.
FlowInterruptedException
e
){
catch
(
org
.
jenkinsci
.
plugins
.
workflow
.
steps
.
FlowInterruptedException
e
){
echo
"The job was cancelled or aborted"
echo
"The job was cancelled or aborted"
throw
e
throw
e
}
}
catch
(
Exception
ex
)
{
catch
(
Exception
ex
)
{
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
"--no-cache ."
)
retimage
=
docker
.
build
(
"${image}"
,
dockerArgs
+
"--no-cache ."
)
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
)
{
timeout
(
time:
5
,
unit:
'MINUTES'
)
timeout
(
time:
5
,
unit:
'MINUTES'
)
{
{
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
sh
'PATH="/opt/rocm/opencl/bin:/opt/rocm/opencl/bin/x86_64:$PATH" clinfo'
}
}
}
}
}
}
}
else
{
timeout
(
time:
3
,
unit:
'HOURS'
){
retimage
=
docker
.
image
(
'compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-dkms-no-npi-hipclang:9110_ubuntu18.04_py3.6_pytorch_rocm5.0_internal_testing_7ff5b54'
).
pull
()
image
=
"b56f8ac0d6ea"
sh
"docker images"
}
}
withDockerContainer
(
image:
image
,
args:
dockerOpts
+
' -v=/var/jenkins/:/var/jenkins'
)
{
withDockerContainer
(
image:
image
,
args:
dockerOpts
+
' -v=/var/jenkins/:/var/jenkins'
)
{
timeout
(
time:
5
,
unit:
'HOURS'
)
timeout
(
time:
5
,
unit:
'HOURS'
)
{
{
cmake_build
(
conf
)
cmake_build
(
conf
)
dir
(
"script"
){
dir
(
"script"
){
def
perf_log
=
"perf_gemm_${gpu_arch}.log"
//run gemm performance tests
sh
"rm -f ${perf_log}"
def
gemm_log
=
"perf_gemm_${gpu_arch}.log"
sh
"echo Branch name: ${env.BRANCH_NAME} > ${perf_log}"
sh
"rm -f ${gemm_log}"
sh
"./profile_gemm.sh gemm 0 0 0 1 0 5 | tee -a ${perf_log}"
sh
"echo Branch name: ${env.BRANCH_NAME} > ${gemm_log}"
sh
"./profile_gemm.sh gemm 1 0 0 1 0 5 | tee -a ${perf_log}"
sh
"echo Node name: ${NODE_NAME} >> ${gemm_log}"
sh
"./profile_gemm.sh gemm 2 0 0 1 0 5 | tee -a ${perf_log}"
sh
"echo GPU_arch name: ${gpu_arch} >> ${gemm_log}"
sh
"./profile_gemm.sh gemm 3 0 0 1 0 5 | tee -a ${perf_log}"
sh
"rocminfo | grep 'Compute Unit:' >> ${gemm_log} "
sh
"./profile_gemm.sh gemm 0 1 0 1 0 5 | tee -a ${perf_log}"
sh
"hipcc --version | grep -e 'HIP version' >> ${gemm_log}"
sh
"./profile_gemm.sh gemm 1 1 0 1 0 5 | tee -a ${perf_log}"
sh
"/opt/rocm/bin/amdclang++ --version | grep -e 'InstalledDir' >> ${gemm_log}"
sh
"./profile_gemm.sh gemm 2 1 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 0 0 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 3 1 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 1 0 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 0 2 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 2 0 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 1 2 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 3 0 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 2 2 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 0 1 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 3 2 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 1 1 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 0 3 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 2 1 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 1 3 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 3 1 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 2 3 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 0 2 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 3 3 0 1 0 5 | tee -a ${perf_log}"
sh
"./profile_gemm.sh gemm 1 2 0 1 0 5 | tee -a ${gemm_log}"
//results will be parsed, stored, and analyzed within the python script
sh
"./profile_gemm.sh gemm 2 2 0 1 0 5 | tee -a ${gemm_log}"
//the script will return 0 if the performance criteria are met
sh
"./profile_gemm.sh gemm 3 2 0 1 0 5 | tee -a ${gemm_log}"
//or return 1 if the criteria are not met
sh
"./profile_gemm.sh gemm 0 3 0 1 0 5 | tee -a ${gemm_log}"
archiveArtifacts
"${perf_log}"
sh
"./profile_gemm.sh gemm 1 3 0 1 0 5 | tee -a ${gemm_log}"
sh
"python3 parse_perf_data.py ${perf_log} "
sh
"./profile_gemm.sh gemm 2 3 0 1 0 5 | tee -a ${gemm_log}"
sh
"./profile_gemm.sh gemm 3 3 0 1 0 5 | tee -a ${gemm_log}"
//results will be parsed, stored, and analyzed within the python script
//the script will return 0 if the performance criteria are met
//or return 1 if the criteria are not met
archiveArtifacts
"${gemm_log}"
sh
"python3 parse_perf_data.py ${gemm_log} "
//run resnet50 test
def
resnet_log
=
"perf_resnet50_${gpu_arch}.log"
sh
"rm -f ${resnet_log}"
sh
"echo Branch name: ${env.BRANCH_NAME} > ${resnet_log}"
sh
"echo Node name: ${NODE_NAME} >> ${resnet_log}"
sh
"echo GPU_arch name: ${gpu_arch} >> ${resnet_log}"
sh
"rocminfo | grep 'Compute Unit:' >> ${resnet_log} "
sh
"hipcc --version | grep -e 'HIP version' >> ${resnet_log}"
sh
"/opt/rocm/bin/amdclang++ --version | grep -e 'InstalledDir' >> ${resnet_log}"
//first run tests with N=256
sh
"./profile_conv.sh conv_fwd_bias_relu 1 1 1 1 0 2 0 1 256 | tee -a ${resnet_log}"
//then run with N=4
sh
"./profile_conv.sh conv_fwd_bias_relu 1 1 1 1 0 2 0 1 4 | tee -a ${resnet_log}"
archiveArtifacts
"${resnet_log}"
//the script will put the results from N=256 and N=4 runs into separate tables
sh
"python3 parse_perf_data.py ${resnet_log} "
}
}
}
}
}
}
...
@@ -265,9 +305,21 @@ pipeline {
...
@@ -265,9 +305,21 @@ pipeline {
options
{
options
{
parallelsAlwaysFailFast
()
parallelsAlwaysFailFast
()
}
}
// environment{
parameters
{
// variable = value
booleanParam
(
// }
name:
"USE_DOCKERFILE"
,
defaultValue:
true
,
description:
""
)
}
environment
{
dbuser
=
"${dbuser}"
dbpassword
=
"${dbpassword}"
dbsship
=
"${dbsship}"
dbsshport
=
"${dbsshport}"
dbsshuser
=
"${dbsshuser}"
dbsshpassword
=
"${dbsshpassword}"
status_wrapper_creds
=
"${status_wrapper_creds}"
}
stages
{
stages
{
stage
(
"Static checks"
)
{
stage
(
"Static checks"
)
{
parallel
{
parallel
{
...
@@ -282,30 +334,6 @@ pipeline {
...
@@ -282,30 +334,6 @@ pipeline {
// buildHipClangJobAndReboot(build_cmd: build_cmd, no_reboot:true, prefixpath: '/opt/rocm', build_type: 'debug')
// buildHipClangJobAndReboot(build_cmd: build_cmd, no_reboot:true, prefixpath: '/opt/rocm', build_type: 'debug')
// }
// }
// }
// }
// we will build and run ckProfiler release version later, during the performance test stage
//stage('Build Profiler: Release, gfx908')
//{
// agent { label rocmnode("nogpu")}
// environment{
// setup_args = """ -D CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " -DBUILD_DEV=On """
// }
// steps{
// buildHipClangJobAndReboot(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Release')
// }
//}
//stage('Build Profiler: Debug, gfx908')
//{
// agent { label rocmnode("nogpu")}
// environment{
// setup_args = """ -D CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " -DBUILD_DEV=On """
// }
// steps{
// // until we stabilize debug build due to compiler crashes
// catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
// buildHipClangJobAndReboot(setup_args:setup_args, config_targets: "ckProfiler", no_reboot:true, build_type: 'Debug')
// }
// }
//}
stage
(
'Clang Format'
)
{
stage
(
'Clang Format'
)
{
agent
{
label
rocmnode
(
"nogpu"
)
}
agent
{
label
rocmnode
(
"nogpu"
)
}
environment
{
environment
{
...
@@ -333,12 +361,11 @@ pipeline {
...
@@ -333,12 +361,11 @@ pipeline {
{
{
agent
{
label
rocmnode
(
"gfx908"
)}
agent
{
label
rocmnode
(
"gfx908"
)}
environment
{
environment
{
setup_args
=
""" -D CMAKE_CXX_FLAGS=" --offload-arch=gfx90
0 --offload-arch=gfx906 --offload-arch=gfx908 --offload-arch=gfx90a
-O3 " -DBUILD_DEV=On """
setup_args
=
""" -D CMAKE_CXX_FLAGS=" --offload-arch=gfx90
8
-O3 " -DBUILD_DEV=On """
}
}
steps
{
steps
{
buildHipClangJobAndReboot
(
setup_args:
setup_args
,
config_targets:
"check"
,
no_reboot:
true
,
build_type:
'Release'
)
buildHipClangJobAndReboot
(
setup_args:
setup_args
,
config_targets:
"check"
,
no_reboot:
true
,
build_type:
'Release'
,
gpu_arch:
"gfx908"
)
}
}
}
}
stage
(
"Run Tests: gfx90a"
)
stage
(
"Run Tests: gfx90a"
)
{
{
...
@@ -347,66 +374,68 @@ pipeline {
...
@@ -347,66 +374,68 @@ pipeline {
setup_args
=
""" -D CMAKE_CXX_FLAGS="--offload-arch=gfx90a -O3 " -DBUILD_DEV=On """
setup_args
=
""" -D CMAKE_CXX_FLAGS="--offload-arch=gfx90a -O3 " -DBUILD_DEV=On """
}
}
steps
{
steps
{
buildHipClangJobAndReboot
(
setup_args:
setup_args
,
config_targets:
"check"
,
no_reboot:
true
,
build_type:
'Release'
)
buildHipClangJobAndReboot
(
setup_args:
setup_args
,
config_targets:
"check"
,
no_reboot:
true
,
build_type:
'Release'
,
gpu_arch:
"gfx90a"
)
}
}
}
}
}
}
}
}
stage
(
"Client App"
)
//stage("Client App")
//{
// parallel
// {
// stage("Run Client App")
// {
// agent{ label rocmnode("gfx908")}
// environment{
// setup_args = """ -D -DBUILD_DEV=Off -DCMAKE_INSTALL_PREFIX=../install CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " """
// execute_args = """ cd ../test/client_app && rm -rf build && mkdir build && cd build && cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" .. && make """
// }
// steps{
// buildHipClangJobAndReboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
// }
// }
// }
//}
stage
(
"Performance Tests"
)
{
{
parallel
parallel
{
{
stage
(
"Run
Client App
"
)
stage
(
"Run
ckProfiler: gfx908
"
)
{
{
agent
{
label
rocmnode
(
"gfx908"
)}
agent
{
label
rocmnode
(
"gfx908"
)}
environment
{
environment
{
setup_args
=
""" -D -DBUILD_DEV=Off -DCMAKE_INSTALL_PREFIX=../install CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " """
setup_args
=
""" -D CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " -DBUILD_DEV=On """
execute_args
=
""" cd ../test/client_app && rm -rf build && mkdir build && cd build && cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" .. && make """
}
}
steps
{
steps
{
buildHipClangJobAndReboo
t
(
setup_args:
setup_args
,
config_targets:
"
install
"
,
no_reboot:
true
,
build_type:
'Release'
,
execute_cmd:
execute_args
,
prefixpath:
'/usr/local'
)
runPerfTes
t
(
setup_args:
setup_args
,
config_targets:
"
ckProfiler
"
,
no_reboot:
true
,
build_type:
'Release'
,
gpu_arch:
"gfx908"
)
}
}
}
}
}
stage
(
"Run ckProfiler: gfx90a"
)
}
stage
(
"Performance Tests"
)
{
parallel
{
stage
(
"Run ckProfiler: gfx908"
)
{
{
agent
{
label
rocmnode
(
"gfx90
8
"
)}
agent
{
label
rocmnode
(
"gfx90
a
"
)}
environment
{
environment
{
setup_args
=
""" -D CMAKE_CXX_FLAGS="--offload-arch=gfx908 -O3 " -DBUILD_DEV=On """
setup_args
=
""" -D CMAKE_CXX_FLAGS="--offload-arch=gfx90a -O3 " -DBUILD_DEV=On """
dbuser
=
"${dbuser}"
dbpassword
=
"${dbpassword}"
dbsship
=
"${dbsship}"
dbsshport
=
"${dbsshport}"
dbsshuser
=
"${dbsshuser}"
dbsshpassword
=
"${dbsshpassword}"
}
}
steps
{
steps
{
runPerfTest
(
setup_args:
setup_args
,
config_targets:
"ckProfiler"
,
no_reboot:
true
,
build_type:
'Release'
)
runPerfTest
(
setup_args:
setup_args
,
config_targets:
"ckProfiler"
,
no_reboot:
true
,
build_type:
'Release'
,
gpu_arch:
"gfx90a"
)
}
}
}
}
}
}
}
}
/* enable after the cmake file supports packaging
// enable after the cmake file supports packaging
stage("Packages") {
// stage("Packages")
{
when
{
//
when {
expression { params.BUILD_PACKAGES && params.TARGET_NOGPU && params.DATATYPE_NA }
//
expression { params.BUILD_PACKAGES && params.TARGET_NOGPU && params.DATATYPE_NA
}
}
//
}
parallel {
//
parallel
{
stage("Package /opt/rocm")
{
//
stage("Package /opt/rocm
")
{
agent{ label rocmnode("nogpu
")
}
//
agent{ label rocmnode("nogpu") }
steps{
//
steps{
buildHipClangJobAndReboot( package_build: "true", prefixpath: '/opt/rocm', gpu_arch: "gfx906;gfx908;gfx90a")
//
buildHipClangJobAndReboot( package_build: "true", prefixpath: '/opt/rocm', gpu_arch: "gfx906;gfx908;gfx90a")
}
//
}
}
//
}
}
//
}
}
/
/ }
*
/
}
}
}
}
LICENSE
0 → 100644
View file @
7a3b49e5
Copyright (c) 2018- , Advanced Micro Devices, Inc. (Chao Liu, Jing Zhang)
Copyright (c) 2019- , Advanced Micro Devices, Inc. (Letao Qin, Qianfeng Zhang, Liang Huang, Shaojie Wang)
Copyright (c) 2022- , Advanced Micro Devices, Inc. (Anthony Chang, Chunyu Lai, Illia Silin, Adam Osewski, Poyen Chen, Jehandad Khan)
Copyright (c) 2019-2021, Advanced Micro Devices, Inc. (Hanwen Chang)
Copyright (c) 2019-2020, Advanced Micro Devices, Inc. (Tejash Shah)
Copyright (c) 2020 , Advanced Micro Devices, Inc. (Xiaoyan Zhou)
Copyright (c) 2021-2022, Advanced Micro Devices, Inc. (Jianfeng Yan)
SPDX-License-Identifier: MIT
Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
README.md
View file @
7a3b49e5
...
@@ -6,7 +6,7 @@ docker run \
...
@@ -6,7 +6,7 @@ docker run \
--group-add
sudo
\
--group-add
sudo
\
-w
/root/workspace
\
-w
/root/workspace
\
-v
${
PATH_TO_LOCAL_WORKSPACE
}
:/root/workspace
\
-v
${
PATH_TO_LOCAL_WORKSPACE
}
:/root/workspace
\
rocm/tensorflow:rocm
4.3
.1-tf2.6-dev
\
rocm/tensorflow:rocm
5
.1-tf2.6-dev
\
/bin/bash
/bin/bash
```
```
...
...
example/01_gemm/gemm_dl_fp16.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_dl.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "ck/library/utility/check_err.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_gemm_dl.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/01_gemm/gemm_dl_fp32.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_dl.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "ck/library/utility/check_err.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_gemm_dl.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/01_gemm/gemm_dl_int8.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_dl.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "ck/library/utility/check_err.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_gemm_dl.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/01_gemm/gemm_xdl_bf16.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_cshuffle.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "device_gemm_xdl_cshuffle.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/01_gemm/gemm_xdl_fp16.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_cshuffle.hpp"
#include "device.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/utility/check_err.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_gemm_xdl.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_gemm_xdl_cshuffle.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
@@ -27,28 +28,29 @@ using Col = ck::tensor_layout::gemm::ColumnMajor;
...
@@ -27,28 +28,29 @@ using Col = ck::tensor_layout::gemm::ColumnMajor;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
ADataType
=
ck
::
half_t
;
using
ADataType
=
F16
;
using
BDataType
=
ck
::
half_t
;
using
BDataType
=
F16
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
F32
;
using
AccDataType
=
float
;
using
CShuffleDataType
=
F32
;
using
CDataType
=
F16
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
ALayout
=
Row
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
BLayout
=
Col
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
CLayout
=
Row
;
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
AElementOp
=
PassThrough
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
BElementOp
=
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
PassThrough
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
// clang-format off
// clang-format off
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemm_Xdl_CShuffle
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemm_Xdl_CShuffle
//######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| ALayout| BLayout| CLayout|
AData|
BData|
CData|
AccData|
CShuffle| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//######| | | |
Type|
Type|
Type|
Type|
DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//######| | | |
|
| |
|
| Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//######| | | | |
|
|
|
| Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//######| | | |
|
| |
|
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
//######| | | | |
|
|
|
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
<
Row
,
Col
,
Row
,
F16
,
F16
,
F16
,
F32
,
F32
,
AElementOp
,
BElementOp
,
CElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
<
ALayout
,
BLayout
,
CLayout
,
ADataType
,
BDataType
,
CDataType
,
AccDataType
,
CShuffleDataType
,
AElementOp
,
BElementOp
,
CElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
// clang-format on
// clang-format on
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
...
@@ -69,7 +71,11 @@ int main(int argc, char* argv[])
...
@@ -69,7 +71,11 @@ int main(int argc, char* argv[])
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideC
=
4096
;
ck
::
index_t
StrideC
=
4096
;
if
(
argc
==
4
)
if
(
argc
==
1
)
{
// use default case
}
else
if
(
argc
==
4
)
{
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
...
@@ -93,7 +99,7 @@ int main(int argc, char* argv[])
...
@@ -93,7 +99,7 @@ int main(int argc, char* argv[])
{
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=n
0
, 1=yes)
\n
"
);
printf
(
"arg3: time kernel (0=n
o
, 1=yes)
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC
\n
"
);
exit
(
0
);
exit
(
0
);
}
}
...
...
example/01_gemm/gemm_xdl_fp64.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl.hpp"
#include "device.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "device_gemm_xdl.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "device_gemm_xdl_cshuffle.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/01_gemm/gemm_xdl_int8.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_cshuffle.hpp"
#include "device.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/utility/check_err.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "device_gemm_xdl_cshuffle.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "reference_gemm.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "gemm_specialization.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/02_gemm_alpha_beta/gemm_xdl_alpha_beta.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_xdl_c_shuffle_bias_2d.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "print.hpp"
#include "device.hpp"
#include "ck/library/utility/check_err.hpp"
#include "host_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "host_gemm.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "device_tensor.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm_bias_2d.hpp"
#include "device_base.hpp"
#include "device_gemm_xdl_c_shuffle_bias_2d.hpp"
#include "element_wise_operation.hpp"
#include "reference_gemm_bias_2d.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
...
...
example/03_gemm_bias_relu/gemm_xdl_bias_relu.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_xdl_cshuffle.hpp"
#include "print.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "host_gemm.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "device_tensor.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "device_gemm_xdl_c_shuffle_bias_activation.hpp"
#include "reference_gemm_bias_activation.hpp"
template
<
ck
::
index_t
...
Is
>
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
ADataType
=
ck
::
half_t
;
using
F16
=
ck
::
half_t
;
using
BDataType
=
ck
::
half_t
;
using
F32
=
float
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
// C = A * B
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
// E = Relu(C + D);
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
struct
AddRelu
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
AddRelu
;
{
__host__
__device__
void
// clang-format off
operator
()(
ck
::
half_t
&
e
,
const
ck
::
half_t
&
c
,
const
ck
::
half_t
&
d
)
const
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmXdl_C_Shuffle_Bias_Activation
<
{
ADataType
,
// ADataType
const
ck
::
half_t
x
=
c
+
d
;
BDataType
,
// BDataType
CDataType
,
// CDataType
e
=
x
>
0
?
x
:
0
;
AccDataType
,
// AccDataType
}
ALayout
,
// ALayout
};
BLayout
,
// BLayout
CLayout
,
// CLayout
using
ADataType
=
F16
;
AElementOp
,
// AElementwiseOperation
using
BDataType
=
F16
;
BElementOp
,
// BElementwiseOperation
using
AccDataType
=
F32
;
CElementOp
,
// CElementwiseOperation
using
CShuffleDataType
=
F16
;
256
,
// BlockSize
using
DDataType
=
F16
;
256
,
// MPerBlock
using
DsDataType
=
ck
::
Tuple
<
DDataType
>
;
128
,
// NPerBlock
using
EDataType
=
F16
;
4
,
// K0PerBlock
8
,
// K1
using
ALayout
=
Row
;
32
,
// MPerXDL
using
BLayout
=
Col
;
32
,
// NPerXDL
using
ELayout
=
Row
;
4
,
// MXdlPerWave
2
,
// NXdlPerWave
using
AElementOp
=
PassThrough
;
S
<
4
,
64
,
1
>
,
// ABlockTransferThreadClusterLengths_K0_M_K1
using
BElementOp
=
PassThrough
;
S
<
1
,
0
,
2
>
,
// ABlockTransferThreadClusterArrangeOrder
using
CDEElementOp
=
AddRelu
;
S
<
1
,
0
,
2
>
,
// ABlockTransferSrcAccessOrder
2
,
// ABlockTransferSrcVectorDim
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
8
,
// ABlockTransferSrcScalarPerVector
8
,
// ABlockTransferDstScalarPerVector_K1
using
DeviceOpInstance
=
true
,
// ABlockLdsAddExtraM
ck
::
tensor_operation
::
device
::
DeviceGemmMultipleD_Xdl_CShuffle
<
ALayout
,
S
<
4
,
64
,
1
>
,
// BBlockTransferThreadClusterLengths_K0_N_K1
BLayout
,
S
<
1
,
0
,
2
>
,
// BBlockTransferThreadClusterArrangeOrder
ELayout
,
S
<
1
,
0
,
2
>
,
// BBlockTransferSrcAccessOrder
ADataType
,
2
,
// BBlockTransferSrcVectorDim
BDataType
,
8
,
// BBlockTransferSrcScalarPerVector
AccDataType
,
8
,
// BBlockTransferDstScalarPerVector_K1
CShuffleDataType
,
true
,
// BBlockLdsAddExtraN
DsDataType
,
1
,
// CShuffleMXdlPerWavePerShuffle
EDataType
,
1
,
// CShuffleNXdlPerWavePerShuffle
AElementOp
,
S
<
1
,
1
,
32
,
1
,
1
,
8
>
,
// CBlockTransferClusterLengths_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl
BElementOp
,
8
>
;
// CBlockTransferScalarPerVector_NWaveNPerXdl
CDEElementOp
,
// clang-format on
GemmDefault
,
1
,
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemmBiasActivation
<
ADataType
,
256
,
BDataType
,
256
,
CDataType
,
128
,
AElementOp
,
32
,
BElementOp
,
8
,
CElementOp
>
;
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
int
main
(
int
argc
,
char
*
argv
[])
int
main
(
int
argc
,
char
*
argv
[])
{
{
...
@@ -94,9 +117,13 @@ int main(int argc, char* argv[])
...
@@ -94,9 +117,13 @@ int main(int argc, char* argv[])
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
Stride
C
=
4096
;
ck
::
index_t
Stride
E
=
4096
;
if
(
argc
==
4
)
if
(
argc
==
1
)
{
// use default case
}
else
if
(
argc
==
4
)
{
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
...
@@ -114,14 +141,14 @@ int main(int argc, char* argv[])
...
@@ -114,14 +141,14 @@ int main(int argc, char* argv[])
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
Stride
C
=
std
::
stoi
(
argv
[
9
]);
Stride
E
=
std
::
stoi
(
argv
[
9
]);
}
}
else
else
{
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=n
0
, 1=yes)
\n
"
);
printf
(
"arg3: time kernel (0=n
o
, 1=yes)
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, Stride
C
\n
"
);
printf
(
"arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, Stride
E
\n
"
);
exit
(
0
);
exit
(
0
);
}
}
...
@@ -141,17 +168,14 @@ int main(int argc, char* argv[])
...
@@ -141,17 +168,14 @@ int main(int argc, char* argv[])
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
CDataType
>
c_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
DDataType
>
d_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
0
,
ELayout
{}));
Tensor
<
CDataType
>
c_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
EDataType
>
e_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
Tensor
<
EDataType
>
e_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
// c0_n[n]
Tensor
<
CDataType
>
c0_n
(
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
static_cast
<
std
::
size_t
>
(
N
)}),
std
::
vector
<
std
::
size_t
>
({
1
})));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"
c
_m_n: "
<<
c
_m_n
_host_result
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"
d
_m_n: "
<<
d
_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"
c0
_n: "
<<
c0_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"
e_m
_n: "
<<
e_m_n_host_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
switch
(
init_method
)
{
{
...
@@ -159,59 +183,59 @@ int main(int argc, char* argv[])
...
@@ -159,59 +183,59 @@ int main(int argc, char* argv[])
case
1
:
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
c0
_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
C
DataType
>
{
-
5
,
5
});
d_m
_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
D
DataType
>
{
-
5
,
5
});
break
;
break
;
default:
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
c0
_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
C
DataType
>
{
0.0
,
1.0
});
d_m
_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D
DataType
>
{
0.0
,
1.0
});
}
}
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c
_m_n_device_buf
(
sizeof
(
C
DataType
)
*
c
_m_n
_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
d
_m_n_device_buf
(
sizeof
(
D
DataType
)
*
d
_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c0
_n_device_buf
(
sizeof
(
C
DataType
)
*
c0_n
.
mDesc
.
GetElementSpace
());
DeviceMem
e_m
_n_device_buf
(
sizeof
(
E
DataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpace
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
c_m_n_device_buf
.
ToDevice
(
c_m_n_device_result
.
mData
.
data
());
d_m_n_device_buf
.
ToDevice
(
d_m_n
.
mData
.
data
());
c0_n_device_buf
.
ToDevice
(
c0_n
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
c_element_op
=
CElementOp
{};
auto
c
de
_element_op
=
C
DE
ElementOp
{};
// do GEMM
// do GEMM
auto
gemm
=
DeviceGemmInstance
{};
auto
device_op
=
DeviceOpInstance
{};
auto
invoker
=
gemm
.
MakeInvoker
();
auto
invoker
=
device_op
.
MakeInvoker
();
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_m_k_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_k_n_device_buf
.
GetDeviceBuffer
()),
auto
argument
=
static_cast
<
CDataType
*>
(
c_m_n_device_buf
.
GetDeviceBuffer
()),
device_op
.
MakeArgument
(
a_m_k_device_buf
.
GetDeviceBuffer
(),
static_cast
<
CDataType
*>
(
c0_n_device_buf
.
GetDeviceBuffer
()),
b_k_n_device_buf
.
GetDeviceBuffer
(),
M
,
std
::
array
<
const
void
*
,
1
>
{
d_m_n_device_buf
.
GetDeviceBuffer
()},
N
,
e_m_n_device_buf
.
GetDeviceBuffer
(),
K
,
M
,
StrideA
,
N
,
StrideB
,
K
,
StrideC
,
StrideA
,
a_element_op
,
StrideB
,
b_element_op
,
std
::
array
<
ck
::
index_t
,
1
>
{
0
},
c_element_op
);
StrideE
,
a_element_op
,
if
(
!
gemm
.
IsSupportedArgument
(
argument
))
b_element_op
,
cde_element_op
);
if
(
!
device_op
.
IsSupportedArgument
(
argument
))
{
{
throw
std
::
runtime_error
(
throw
std
::
runtime_error
(
"wrong! this device_op instance does not support this problem"
);
"wrong! device_gemm with the specified compilation parameters does "
"not support this GEMM problem"
);
}
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
M
+
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
C
DataType
)
*
M
*
N
+
sizeof
(
C
DataType
)
*
N
;
sizeof
(
E
DataType
)
*
M
*
N
+
sizeof
(
E
DataType
)
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
...
@@ -220,19 +244,37 @@ int main(int argc, char* argv[])
...
@@ -220,19 +244,37 @@ int main(int argc, char* argv[])
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s"
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
<<
std
::
endl
;
c_m_n_device_buf
.
FromDevice
(
c_m_n_device_result
.
mData
.
data
());
if
(
do_verification
)
if
(
do_verification
)
{
{
e_m_n_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
Tensor
<
AccDataType
>
c_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
AccDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
auto
ref_argument
=
a_m_k
,
b_k_n
,
c_m_n
_host_result
,
c0_n
,
a_element_op
,
b_element_op
,
c_element_op
);
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n
,
a_element_op
,
b_element_op
,
PassThrough
{}
);
ref_invoker
.
Run
(
ref_argument
);
ref_invoker
.
Run
(
ref_argument
);
return
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
)
?
0
:
1
;
for
(
int
m
=
0
;
m
<
M
;
++
m
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
cde_element_op
(
e_m_n_host_result
(
m
,
n
),
c_m_n
(
m
,
n
),
d_m_n
(
m
,
n
));
}
}
return
ck
::
utils
::
check_err
(
e_m_n_device_result
.
mData
,
e_m_n_host_result
.
mData
)
?
0
:
1
;
}
}
return
0
;
return
0
;
...
...
example/04_gemm_add_add_fastgelu/CMakeLists.txt
0 → 100644
View file @
7a3b49e5
add_example_executable
(
example_gemm_add_add_fastgelu_xdl_fp16 gemm_add_add_fastgelu_xdl_fp16.cpp
)
example/04_gemm_
bias_relu_add
/README.md
→
example/04_gemm_
add_add_fastgelu
/README.md
View file @
7a3b49e5
# Instructions for ```example_gemm_
xdl_bias_relu_add
```
# Instructions for ```example_gemm_
add_add_fastgelu_xdl_fp16
```
## Run ```example_gemm_
xdl_bias_relu_add
```
## Run ```example_gemm_
add_add_fastgelu_xdl_fp16
```
```
bash
```
bash
#arg1: verification (0=no, 1=yes)
#arg1: verification (0=no, 1=yes)
#arg2: initialization (0=no init, 1=integer value, 2=decimal value)
#arg2: initialization (0=no init, 1=integer value, 2=decimal value)
#arg3:
run
kernel
# of times (>1
)
#arg3:
time
kernel
(0=no, 1=yes
)
#arg4 to
9
: M (256x), N(128x), K(32x), StrideA, StrideB, Stride
C
#arg4 to
11
: M (256x), N(128x), K(32x), StrideA, StrideB, Stride
D0, StrideD1, StrideE"
./bin/example_gemm_
xdl_bias_relu_add 0 1 5 3840 4096 4096 4096 4096 4096
./bin/example_gemm_
add_add_fastgelu_xdl_fp16 1 1 1
```
```
Result (MI100 @ 1087Mhz, 133.5TFlops peak FP16)
Result (MI100 @ 1087Mhz, 133.5TFlops peak FP16)
```
```
a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1}
a_m_k: dim 2, lengths {3840, 4096}, strides {4096, 1}
b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096}
b_k_n: dim 2, lengths {4096, 4096}, strides {1, 4096}
c_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
d0_m_n: dim 2, lengths {3840, 4096}, strides {0, 1}
c0_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
d1_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
c1_m_n: dim 2, lengths {3840, 4096}, strides {1, 0}
e_m_n: dim 2, lengths {3840, 4096}, strides {4096, 1}
arg.a_grid_desc_k0_m_k1_{512, 3840, 8}
arg.b_grid_desc_k0_n_k1_{512, 4096, 8}
arg.c_grid_desc_m_n_{ 3840, 4096}
arg.c0_grid_desc_m_n_{ 3840, 4096}
arg.c1_grid_desc_m_n_{ 3840, 4096}
launch_and_time_kernel: grid_dim {480, 1, 1}, block_dim {256, 1, 1}
launch_and_time_kernel: grid_dim {480, 1, 1}, block_dim {256, 1, 1}
Warm up
Warm up
1 time
Start running
5
times...
Start running
10
times...
Perf: 1.2
7583
ms, 10
0.992
TFlops,
73.9688 GB/s
Perf: 1.2
6914
ms, 10
1.525
TFlops,
100.804 GB/s, DeviceGemmMultipleD_Xdl_CShuffle<256, 256, 128, 32, 8, 8>
```
```
example/04_gemm_add_add_fastgelu/gemm_add_add_fastgelu_xdl_fp16.cpp
0 → 100644
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
#include "ck/library/utility/check_err.hpp"
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
Row
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
Col
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
AddAddFastGelu
=
ck
::
tensor_operation
::
element_wise
::
AddAddFastGelu
;
using
ADataType
=
F16
;
using
BDataType
=
F16
;
using
AccDataType
=
F32
;
using
CShuffleDataType
=
F32
;
using
D0DataType
=
F16
;
using
D1DataType
=
F16
;
using
DsDataType
=
ck
::
Tuple
<
D0DataType
,
D1DataType
>
;
using
EDataType
=
F16
;
using
ALayout
=
Row
;
using
BLayout
=
Col
;
using
D0Layout
=
Row
;
using
D1Layout
=
Row
;
using
ELayout
=
Row
;
using
AElementOp
=
PassThrough
;
using
BElementOp
=
PassThrough
;
using
CDEElementOp
=
AddAddFastGelu
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
// clang-format off
using
DeviceOpInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmMultipleD_Xdl_CShuffle
//######| ALayout| BLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//######| | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//######| | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
<
ALayout
,
BLayout
,
ELayout
,
ADataType
,
BDataType
,
AccDataType
,
CShuffleDataType
,
DsDataType
,
EDataType
,
AElementOp
,
BElementOp
,
CDEElementOp
,
GemmDefault
,
1
,
256
,
256
,
128
,
32
,
8
,
8
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
8
,
8
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
8
>
,
8
>
;
// clang-format on
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
// GEMM shape
ck
::
index_t
M
=
3840
;
ck
::
index_t
N
=
4096
;
ck
::
index_t
K
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideD0
=
0
;
ck
::
index_t
StrideD1
=
4096
;
ck
::
index_t
StrideE
=
4096
;
if
(
argc
==
1
)
{
// use default case
}
else
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
if
(
argc
==
12
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
M
=
std
::
stoi
(
argv
[
4
]);
N
=
std
::
stoi
(
argv
[
5
]);
K
=
std
::
stoi
(
argv
[
6
]);
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
StrideD0
=
std
::
stoi
(
argv
[
9
]);
StrideD1
=
std
::
stoi
(
argv
[
10
]);
StrideE
=
std
::
stoi
(
argv
[
11
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=no, 1=yes)
\n
"
);
printf
(
"arg4 to 10: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD0, StrideD1, "
"StrideE
\n
"
);
exit
(
0
);
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
};
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
D0DataType
>
d0_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideD0
,
D0Layout
{}));
Tensor
<
D1DataType
>
d1_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideD1
,
D1Layout
{}));
Tensor
<
EDataType
>
e_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
Tensor
<
EDataType
>
e_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideE
,
ELayout
{}));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d0_m_n: "
<<
d0_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"d1_m_n: "
<<
d1_m_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"e_m_n: "
<<
e_m_n_host_result
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
d0_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
D0DataType
>
{
-
5
,
5
});
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
D1DataType
>
{
-
5
,
5
});
break
;
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
d0_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D0DataType
>
{
0.0
,
1.0
});
d1_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
D1DataType
>
{
0.0
,
1.0
});
}
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d0_m_n_device_buf
(
sizeof
(
D0DataType
)
*
d0_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
d1_m_n_device_buf
(
sizeof
(
D1DataType
)
*
d1_m_n
.
mDesc
.
GetElementSpace
());
DeviceMem
e_m_n_device_buf
(
sizeof
(
EDataType
)
*
e_m_n_device_result
.
mDesc
.
GetElementSpace
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
d0_m_n_device_buf
.
ToDevice
(
d0_m_n
.
mData
.
data
());
d1_m_n_device_buf
.
ToDevice
(
d1_m_n
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
cde_element_op
=
CDEElementOp
{};
// do GEMM
auto
device_op
=
DeviceOpInstance
{};
auto
invoker
=
device_op
.
MakeInvoker
();
auto
argument
=
device_op
.
MakeArgument
(
a_m_k_device_buf
.
GetDeviceBuffer
(),
b_k_n_device_buf
.
GetDeviceBuffer
(),
std
::
array
<
const
void
*
,
2
>
{
d0_m_n_device_buf
.
GetDeviceBuffer
(),
d1_m_n_device_buf
.
GetDeviceBuffer
()},
e_m_n_device_buf
.
GetDeviceBuffer
(),
M
,
N
,
K
,
StrideA
,
StrideB
,
std
::
array
<
ck
::
index_t
,
2
>
{
StrideD0
,
StrideD1
},
StrideE
,
a_element_op
,
b_element_op
,
cde_element_op
);
if
(
!
device_op
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! this device_op instance does not support this problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
N
+
sizeof
(
D0DataType
)
*
N
+
sizeof
(
D1DataType
)
*
M
*
N
+
sizeof
(
EDataType
)
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s, "
<<
device_op
.
GetTypeString
()
<<
std
::
endl
;
if
(
do_verification
)
{
Tensor
<
AccDataType
>
c_m_n
(
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
{
static_cast
<
std
::
size_t
>
(
M
),
static_cast
<
std
::
size_t
>
(
N
)}));
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
AccDataType
,
AccDataType
,
AElementOp
,
BElementOp
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n
,
a_element_op
,
b_element_op
,
PassThrough
{});
ref_invoker
.
Run
(
ref_argument
);
for
(
int
m
=
0
;
m
<
M
;
++
m
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
cde_element_op
(
e_m_n_host_result
(
m
,
n
),
c_m_n
(
m
,
n
),
d0_m_n
(
m
,
n
),
d1_m_n
(
m
,
n
));
}
}
e_m_n_device_buf
.
FromDevice
(
e_m_n_device_result
.
mData
.
data
());
return
ck
::
utils
::
check_err
(
e_m_n_device_result
.
mData
,
e_m_n_host_result
.
mData
)
?
0
:
1
;
}
return
0
;
}
example/04_gemm_bias_relu_add/CMakeLists.txt
deleted
100644 → 0
View file @
e07b3d8e
add_example_executable
(
example_gemm_xdl_bias_relu_add gemm_xdl_bias_relu_add.cpp
)
example/04_gemm_bias_relu_add/gemm_xdl_bias_relu_add.cpp
deleted
100644 → 0
View file @
e07b3d8e
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_gemm.hpp"
#include "device_tensor.hpp"
#include "element_wise_operation.hpp"
#include "device_gemm_xdl_c_shuffle_bias_activation_add.hpp"
#include "reference_gemm_bias_activation_add.hpp"
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
ADataType
=
ck
::
half_t
;
using
BDataType
=
ck
::
half_t
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
float
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
AddReluAdd
;
// clang-format off
using
DeviceGemmInstance
=
ck
::
tensor_operation
::
device
::
DeviceGemmXdl_C_Shuffle_Bias_Activation_Add
<
ADataType
,
// ADataType
BDataType
,
// BDataType
CDataType
,
// CDataType
AccDataType
,
// AccDataType
ALayout
,
// ALayout
BLayout
,
// BLayout
CLayout
,
// CLayout
AElementOp
,
// AElementwiseOperation
BElementOp
,
// BElementwiseOperation
CElementOp
,
// CElementwiseOperation
256
,
// BlockSize
256
,
// MPerBlock
128
,
// NPerBlock
4
,
// K0PerBlock
8
,
// K1
32
,
// MPerXDL
32
,
// NPerXDL
4
,
// MXdlPerWave
2
,
// NXdlPerWave
S
<
4
,
64
,
1
>
,
// ABlockTransferThreadClusterLengths_K0_M_K1
S
<
1
,
0
,
2
>
,
// ABlockTransferThreadClusterArrangeOrder
S
<
1
,
0
,
2
>
,
// ABlockTransferSrcAccessOrder
2
,
// ABlockTransferSrcVectorDim
8
,
// ABlockTransferSrcScalarPerVector
8
,
// ABlockTransferDstScalarPerVector_K1
true
,
// ABlockLdsAddExtraM
S
<
4
,
64
,
1
>
,
// BBlockTransferThreadClusterLengths_K0_N_K1
S
<
1
,
0
,
2
>
,
// BBlockTransferThreadClusterArrangeOrder
S
<
1
,
0
,
2
>
,
// BBlockTransferSrcAccessOrder
2
,
// BBlockTransferSrcVectorDim
8
,
// BBlockTransferSrcScalarPerVector
8
,
// BBlockTransferDstScalarPerVector_K1
true
,
// BBlockLdsAddExtraN
1
,
// CShuffleMXdlPerWavePerShuffle
1
,
// CShuffleNXdlPerWavePerShuffle
S
<
1
,
1
,
32
,
1
,
1
,
8
>
,
// CBlockTransferClusterLengths_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl
8
>
;
// CBlockTransferScalarPerVector_NWaveNPerXdl
// clang-format on
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemmBiasActivationAdd
<
ADataType
,
BDataType
,
CDataType
,
AElementOp
,
BElementOp
,
CElementOp
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
int
init_method
=
1
;
bool
time_kernel
=
false
;
// GEMM shape
ck
::
index_t
M
=
3840
;
ck
::
index_t
N
=
4096
;
ck
::
index_t
K
=
4096
;
ck
::
index_t
StrideA
=
4096
;
ck
::
index_t
StrideB
=
4096
;
ck
::
index_t
StrideC
=
4096
;
ck
::
index_t
StrideC1
=
4096
;
if
(
argc
==
4
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
}
else
if
(
argc
==
11
)
{
do_verification
=
std
::
stoi
(
argv
[
1
]);
init_method
=
std
::
stoi
(
argv
[
2
]);
time_kernel
=
std
::
stoi
(
argv
[
3
]);
M
=
std
::
stoi
(
argv
[
4
]);
N
=
std
::
stoi
(
argv
[
5
]);
K
=
std
::
stoi
(
argv
[
6
]);
StrideA
=
std
::
stoi
(
argv
[
7
]);
StrideB
=
std
::
stoi
(
argv
[
8
]);
StrideC
=
std
::
stoi
(
argv
[
9
]);
StrideC1
=
std
::
stoi
(
argv
[
10
]);
}
else
{
printf
(
"arg1: verification (0=no, 1=yes)
\n
"
);
printf
(
"arg2: initialization (0=no init, 1=integer value, 2=decimal value)
\n
"
);
printf
(
"arg3: time kernel (0=n0, 1=yes)
\n
"
);
printf
(
"arg4 to 10: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC, StrideC1
\n
"
);
exit
(
0
);
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
};
Tensor
<
ADataType
>
a_m_k
(
f_host_tensor_descriptor
(
M
,
K
,
StrideA
,
ALayout
{}));
Tensor
<
BDataType
>
b_k_n
(
f_host_tensor_descriptor
(
K
,
N
,
StrideB
,
BLayout
{}));
Tensor
<
CDataType
>
c_m_n_host_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
Tensor
<
CDataType
>
c_m_n_device_result
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
// c0_n[n]
Tensor
<
CDataType
>
c0_n
(
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
static_cast
<
std
::
size_t
>
(
N
)}),
std
::
vector
<
std
::
size_t
>
({
1
})));
// c1_m_n[m ,n]
Tensor
<
CDataType
>
c1_m_n
(
f_host_tensor_descriptor
(
M
,
N
,
StrideC
,
CLayout
{}));
std
::
cout
<<
"a_m_k: "
<<
a_m_k
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"b_k_n: "
<<
b_k_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c_m_n: "
<<
c_m_n_host_result
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c0_n: "
<<
c0_n
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"c1_m_n: "
<<
c1_m_n
.
mDesc
<<
std
::
endl
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
c0_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
CDataType
>
{
-
5
,
5
});
c1_m_n
.
GenerateTensorValue
(
GeneratorTensor_2
<
CDataType
>
{
-
5
,
5
});
break
;
default:
a_m_k
.
GenerateTensorValue
(
GeneratorTensor_3
<
ADataType
>
{
0.0
,
1.0
});
b_k_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
BDataType
>
{
-
0.5
,
0.5
});
c0_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
CDataType
>
{
0.0
,
1.0
});
c1_m_n
.
GenerateTensorValue
(
GeneratorTensor_3
<
CDataType
>
{
0.0
,
1.0
});
}
DeviceMem
a_m_k_device_buf
(
sizeof
(
ADataType
)
*
a_m_k
.
mDesc
.
GetElementSpace
());
DeviceMem
b_k_n_device_buf
(
sizeof
(
BDataType
)
*
b_k_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c_m_n_device_buf
(
sizeof
(
CDataType
)
*
c_m_n_device_result
.
mDesc
.
GetElementSpace
());
DeviceMem
c0_n_device_buf
(
sizeof
(
CDataType
)
*
c0_n
.
mDesc
.
GetElementSpace
());
DeviceMem
c1_m_n_device_buf
(
sizeof
(
CDataType
)
*
c1_m_n
.
mDesc
.
GetElementSpace
());
a_m_k_device_buf
.
ToDevice
(
a_m_k
.
mData
.
data
());
b_k_n_device_buf
.
ToDevice
(
b_k_n
.
mData
.
data
());
c_m_n_device_buf
.
ToDevice
(
c_m_n_device_result
.
mData
.
data
());
c0_n_device_buf
.
ToDevice
(
c0_n
.
mData
.
data
());
c1_m_n_device_buf
.
ToDevice
(
c1_m_n
.
mData
.
data
());
auto
a_element_op
=
AElementOp
{};
auto
b_element_op
=
BElementOp
{};
auto
c_element_op
=
CElementOp
{};
// do GEMM
auto
gemm
=
DeviceGemmInstance
{};
auto
invoker
=
gemm
.
MakeInvoker
();
auto
argument
=
gemm
.
MakeArgument
(
static_cast
<
ADataType
*>
(
a_m_k_device_buf
.
GetDeviceBuffer
()),
static_cast
<
BDataType
*>
(
b_k_n_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c_m_n_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c0_n_device_buf
.
GetDeviceBuffer
()),
static_cast
<
CDataType
*>
(
c1_m_n_device_buf
.
GetDeviceBuffer
()),
M
,
N
,
K
,
StrideA
,
StrideB
,
StrideC
,
StrideC1
,
a_element_op
,
b_element_op
,
c_element_op
);
if
(
!
gemm
.
IsSupportedArgument
(
argument
))
{
throw
std
::
runtime_error
(
"wrong! device_gemm with the specified compilation parameters does "
"not support this GEMM problem"
);
}
float
ave_time
=
invoker
.
Run
(
argument
,
StreamConfig
{
nullptr
,
time_kernel
});
std
::
size_t
flop
=
std
::
size_t
(
2
)
*
M
*
N
*
K
;
std
::
size_t
num_btype
=
sizeof
(
ADataType
)
*
M
*
K
+
sizeof
(
BDataType
)
*
K
*
M
+
sizeof
(
CDataType
)
*
M
*
N
+
sizeof
(
CDataType
)
*
N
+
sizeof
(
CDataType
)
*
M
*
N
;
float
tflops
=
static_cast
<
float
>
(
flop
)
/
1.E9
/
ave_time
;
float
gb_per_sec
=
num_btype
/
1.E6
/
ave_time
;
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms, "
<<
tflops
<<
" TFlops, "
<<
gb_per_sec
<<
" GB/s"
<<
std
::
endl
;
c_m_n_device_buf
.
FromDevice
(
c_m_n_device_result
.
mData
.
data
());
if
(
do_verification
)
{
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_m_k
,
b_k_n
,
c_m_n_host_result
,
c0_n
,
c1_m_n
,
a_element_op
,
b_element_op
,
c_element_op
);
ref_invoker
.
Run
(
ref_argument
);
return
ck
::
utils
::
check_err
(
c_m_n_device_result
.
mData
,
c_m_n_host_result
.
mData
)
?
0
:
1
;
}
return
0
;
}
example/06_conv2d_fwd_bias_relu/conv2d_fwd_xdl_bias_relu.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "conv_util.hpp"
#include "device.hpp"
#include "ck/library/utility/check_err.hpp"
#include "device_conv2d_fwd_xdl_c_shuffle_bias_activation_nhwc_kyxc_nhwk.hpp"
#include "ck/library/utility/conv_util.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd_bias_activation.hpp"
#include "reference_conv_fwd_bias_activation.hpp"
#include "tensor_layout.hpp"
namespace
{
namespace
{
...
...
example/07_conv2d_fwd_bias_relu_add/conv2d_fwd_xdl_bias_relu_add.cpp
View file @
7a3b49e5
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <iostream>
#include <numeric>
#include <numeric>
#include <initializer_list>
#include <initializer_list>
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "check_err.hpp"
#include "ck/tensor_operation/gpu/device/device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp"
#include "config.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "conv_util.hpp"
#include "device.hpp"
#include "ck/library/utility/check_err.hpp"
#include "device_conv2d_fwd_xdl_c_shuffle_bias_activation_add_nhwc_kyxc_nhwk.hpp"
#include "ck/library/utility/conv_util.hpp"
#include "device_tensor.hpp"
#include "ck/library/host_tensor/device_memory.hpp"
#include "element_wise_operation.hpp"
#include "ck/library/host_tensor/host_tensor.hpp"
#include "host_tensor.hpp"
#include "ck/library/host_tensor/host_tensor_generator.hpp"
#include "host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd_bias_activation_add.hpp"
#include "reference_conv_fwd_bias_activation_add.hpp"
#include "tensor_layout.hpp"
namespace
{
namespace
{
...
@@ -224,10 +225,10 @@ int main(int argc, char* argv[])
...
@@ -224,10 +225,10 @@ int main(int argc, char* argv[])
{
{
case
0
:
break
;
case
0
:
break
;
case
1
:
case
1
:
input
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
});
input
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
2
,
2
});
weights
.
GenerateTensorValue
(
GeneratorTensor_2
<
WeiDataType
>
{
-
5
,
5
});
weights
.
GenerateTensorValue
(
GeneratorTensor_2
<
WeiDataType
>
{
-
2
,
2
});
bias
.
GenerateTensorValue
(
GeneratorTensor_2
<
OutDataType
>
{
-
5
,
5
});
bias
.
GenerateTensorValue
(
GeneratorTensor_2
<
OutDataType
>
{
-
2
,
2
});
residual
.
GenerateTensorValue
(
GeneratorTensor_2
<
OutDataType
>
{
-
5
,
5
});
residual
.
GenerateTensorValue
(
GeneratorTensor_2
<
OutDataType
>
{
-
2
,
2
});
break
;
break
;
default:
default:
input
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
0.0
,
1.0
});
input
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
0.0
,
1.0
});
...
...
Prev
1
2
3
4
5
…
30
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment