Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
f21cda25
Unverified
Commit
f21cda25
authored
Oct 14, 2024
by
Bartłomiej Kocot
Committed by
GitHub
Oct 14, 2024
Browse files
Add transpose scale amax example (#1547)
* Add transpose scale amax example * fixes * Tune reduce instance
parent
35c1777d
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
258 additions
and
0 deletions
+258
-0
example/44_elementwise_permute/CMakeLists.txt
example/44_elementwise_permute/CMakeLists.txt
+1
-0
example/44_elementwise_permute/elementwise_scale_permute_amax_2D_fp16_fp8.cpp
...se_permute/elementwise_scale_permute_amax_2D_fp16_fp8.cpp
+247
-0
include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp
...or_operation/gpu/element/unary_element_wise_operation.hpp
+6
-0
include/ck/utility/math_v2.hpp
include/ck/utility/math_v2.hpp
+4
-0
No files found.
example/44_elementwise_permute/CMakeLists.txt
View file @
f21cda25
...
@@ -5,3 +5,4 @@ add_example_executable(example_elementwise_permute_4D_fp32_col elementwise_permu
...
@@ -5,3 +5,4 @@ add_example_executable(example_elementwise_permute_4D_fp32_col elementwise_permu
add_example_executable
(
example_elementwise_permute_4D_fp16_col elementwise_permute_4D_fp16_col.cpp
)
add_example_executable
(
example_elementwise_permute_4D_fp16_col elementwise_permute_4D_fp16_col.cpp
)
add_example_executable
(
example_elementwise_binary_4D_fp16 elementwise_binary_4D_fp16.cpp
)
add_example_executable
(
example_elementwise_binary_4D_fp16 elementwise_binary_4D_fp16.cpp
)
add_example_executable
(
example_elementwise_trinary_4D_fp16 elementwise_trinary_4D_fp16.cpp
)
add_example_executable
(
example_elementwise_trinary_4D_fp16 elementwise_trinary_4D_fp16.cpp
)
add_example_executable
(
elementwise_scale_permute_amax_2D_fp16_fp8 elementwise_scale_permute_amax_2D_fp16_fp8.cpp
)
example/44_elementwise_permute/elementwise_scale_permute_amax_2D_fp16_fp8.cpp
0 → 100644
View file @
f21cda25
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <cstdlib>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/binary_element_wise_operation.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_elementwise_dynamic_vector_dims_impl.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_reduce_multiblock.hpp"
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_elementwise.hpp"
#include "ck/library/utility/algorithm.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/utility/reduction_enums.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
F8
=
ck
::
f8_t
;
using
InputDataType
=
F16
;
using
ScaleDataType
=
F32
;
using
OutputDataType
=
F8
;
static
constexpr
ck
::
index_t
NumDim
=
2
;
constexpr
ck
::
ReduceTensorOp
ReduceOpId
=
ck
::
ReduceTensorOp
::
MAX
;
constexpr
bool
PropagateNan
=
true
;
constexpr
bool
OutputIndex
=
false
;
using
ReduceOperation
=
typename
ck
::
reduce_binary_operator
<
ReduceOpId
>::
opType
;
struct
ScalePassThrough
{
ScalePassThrough
(
const
float
alpha
=
1.
f
)
:
alpha_
(
alpha
)
{}
__host__
__device__
constexpr
void
operator
()(
OutputDataType
&
y0
,
OutputDataType
&
y1
,
const
InputDataType
&
x0
)
const
{
y0
=
ck
::
type_convert
<
OutputDataType
>
(
ck
::
type_convert
<
ScaleDataType
>
(
x0
)
*
alpha_
);
y1
=
y0
;
}
const
ScaleDataType
alpha_
;
};
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
UnaryAbs
=
ck
::
tensor_operation
::
element_wise
::
UnaryAbs
;
using
DeviceElementwisePermuteInstance
=
ck
::
tensor_operation
::
device
::
DeviceElementwiseImpl
<
ck
::
Tuple
<
InputDataType
>
,
// InDataTypeTuple
ck
::
Tuple
<
OutputDataType
,
OutputDataType
>
,
// OutDataTypeTuple
ScalePassThrough
,
// Elementwise
NumDim
,
// NumDim
256
,
// BlockSize
128
,
// M0PerBlock
128
,
// M1PerBlock
8
,
// M0PerThread
8
,
// M1PerThread
ck
::
Sequence
<
1
,
0
>
,
// ThreadClusterArrangeOrder
ck
::
Sequence
<
8
>
,
// InScalarPerVectorSeq
ck
::
Sequence
<
8
,
1
>>
;
// OutScalarPerVectorSeq
using
DeviceReduceInstance
=
ck
::
tensor_operation
::
device
::
DeviceReduceMultiBlock
<
OutputDataType
,
OutputDataType
,
OutputDataType
,
NumDim
,
NumDim
,
ReduceOperation
,
UnaryAbs
,
PassThrough
,
ck
::
InMemoryDataOperationEnum
::
Set
,
PropagateNan
,
OutputIndex
,
false
,
// HaveIndexInputIfOutputIndex
1024
,
// BlockSize
1
,
// MThreadClusterSize
1024
,
// KThreadClusterSize
1
,
// MThreadSliceSize
16
,
// KThreadSliceSize
1
,
// InSrcVectorDim
16
,
// InSrceVectorSize
1
>
;
// OutDstVectorSize
void
reference_scale_permute_amax
(
Tensor
<
InputDataType
>&
input
,
Tensor
<
OutputDataType
>&
host_output_scaled_casted_transposed
,
Tensor
<
OutputDataType
>&
host_output_scaled_casted
,
Tensor
<
OutputDataType
>&
host_output_amax
,
const
float
scale
)
{
ScalePassThrough
out_element_op
(
scale
);
const
ck
::
index_t
M
=
input
.
GetLengths
()[
0
];
const
ck
::
index_t
K
=
input
.
GetLengths
()[
1
];
for
(
ck
::
index_t
m
=
0
;
m
<
M
;
m
++
)
{
for
(
ck
::
index_t
k
=
0
;
k
<
K
;
k
++
)
{
OutputDataType
y0
,
y1
;
out_element_op
(
y0
,
y1
,
input
(
m
,
k
));
host_output_scaled_casted
(
m
,
k
)
=
y0
;
host_output_scaled_casted_transposed
(
m
,
k
)
=
y1
;
const
OutputDataType
y_fabs
=
ck
::
type_convert
<
OutputDataType
>
(
ck
::
math
::
abs
(
ck
::
type_convert
<
float
>
(
y0
)));
host_output_amax
(
0
)
=
ck
::
math
::
max
(
y_fabs
,
host_output_amax
(
0
));
}
}
}
int
main
(
int
argc
,
char
*
argv
[])
{
bool
do_verification
=
true
;
bool
time_kernel
=
true
;
const
float
scale
=
2.
f
;
ck
::
index_t
M
=
1024
;
ck
::
index_t
K
=
1024
;
if
(
argc
==
3
)
{
M
=
std
::
stoi
(
argv
[
1
]);
K
=
std
::
stoi
(
argv
[
2
]);
}
std
::
array
<
ck
::
index_t
,
2
>
dims
=
{
M
,
K
};
std
::
array
<
ck
::
index_t
,
2
>
in_strides
=
{
K
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
out_strides
=
{
1
,
M
};
Tensor
<
InputDataType
>
input
(
dims
,
in_strides
);
Tensor
<
OutputDataType
>
output_scaled_casted_transposed
(
dims
,
out_strides
);
Tensor
<
OutputDataType
>
output_scaled_casted
(
dims
,
in_strides
);
Tensor
<
OutputDataType
>
output_amax
({
1
});
input
.
GenerateTensorValue
(
GeneratorTensor_3
<
InputDataType
>
{
0.0
,
1.0
});
DeviceMem
input_dev_buf
(
sizeof
(
InputDataType
)
*
input
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
output_scaled_casted_transposed_dev_buf
(
sizeof
(
OutputDataType
)
*
output_scaled_casted_transposed
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
output_scaled_casted_dev_buf
(
sizeof
(
OutputDataType
)
*
output_scaled_casted
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
output_amax_dev_buf
(
sizeof
(
OutputDataType
)
*
output_amax
.
mDesc
.
GetElementSpaceSize
());
input_dev_buf
.
ToDevice
(
input
.
mData
.
data
());
std
::
array
<
const
void
*
,
1
>
inputs
=
{
input_dev_buf
.
GetDeviceBuffer
()};
std
::
array
<
void
*
,
2
>
outputs
=
{
output_scaled_casted_transposed_dev_buf
.
GetDeviceBuffer
(),
output_scaled_casted_dev_buf
.
GetDeviceBuffer
()};
std
::
cout
<<
"Input: "
<<
input
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"Scale: "
<<
scale
<<
std
::
endl
;
std
::
cout
<<
"Output scaled casted transposed: "
<<
output_scaled_casted_transposed
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"Output scaled casted: "
<<
output_scaled_casted
.
mDesc
<<
std
::
endl
;
std
::
cout
<<
"Output amax: "
<<
output_amax
.
mDesc
<<
std
::
endl
;
auto
launch_transpose_scale
=
[
&
]()
{
auto
transposeScale
=
DeviceElementwisePermuteInstance
{};
auto
argument
=
transposeScale
.
MakeArgumentPointer
(
dims
,
{
in_strides
},
{
out_strides
,
in_strides
},
inputs
,
outputs
,
ScalePassThrough
{
scale
});
if
(
!
transposeScale
.
IsSupportedArgument
(
argument
.
get
()))
{
throw
std
::
runtime_error
(
"The runtime parameters seems not supported by the device instance, exiting!"
);
};
auto
transposeScale_invoker_ptr
=
transposeScale
.
MakeInvokerPointer
();
return
transposeScale_invoker_ptr
->
Run
(
argument
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
};
auto
launch_reduce
=
[
&
]()
{
auto
reduce
=
DeviceReduceInstance
{};
auto
reduce_argument_ptr
=
reduce
.
MakeArgumentPointer
(
dims
,
in_strides
,
{
1
},
// Output Lengths
{
1
},
// Output Strides
{
0
,
1
},
// Reduce Dims
static_cast
<
double
>
(
1.
f
),
static_cast
<
double
>
(
0.
f
),
output_scaled_casted_dev_buf
.
GetDeviceBuffer
(),
nullptr
,
output_amax_dev_buf
.
GetDeviceBuffer
(),
nullptr
,
UnaryAbs
{},
PassThrough
{});
if
(
!
reduce
.
IsSupportedArgument
(
reduce_argument_ptr
.
get
()))
{
throw
std
::
runtime_error
(
"The runtime parameters seems not supported by the device instance, exiting!"
);
};
auto
invoker_ptr
=
reduce
.
MakeInvokerPointer
();
return
invoker_ptr
->
Run
(
reduce_argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
};
float
ave_time
=
launch_transpose_scale
();
ave_time
+=
launch_reduce
();
std
::
cout
<<
"Perf: "
<<
ave_time
<<
" ms"
<<
std
::
endl
;
bool
pass
=
true
;
if
(
do_verification
)
{
Tensor
<
OutputDataType
>
host_output_scaled_casted_transposed
(
dims
,
out_strides
);
Tensor
<
OutputDataType
>
host_output_scaled_casted
(
dims
,
in_strides
);
Tensor
<
OutputDataType
>
host_output_amax
({
1
});
reference_scale_permute_amax
(
input
,
host_output_scaled_casted_transposed
,
host_output_scaled_casted
,
host_output_amax
,
scale
);
output_scaled_casted_transposed_dev_buf
.
FromDevice
(
output_scaled_casted_transposed
.
mData
.
data
());
output_scaled_casted_dev_buf
.
FromDevice
(
output_scaled_casted
.
mData
.
data
());
output_amax_dev_buf
.
FromDevice
(
output_amax
.
mData
.
data
());
pass
&=
ck
::
utils
::
check_err
(
output_scaled_casted_transposed
.
mData
,
host_output_scaled_casted_transposed
.
mData
,
"Error: Incorrect results scaled transposed"
,
1e-3
,
1e-3
);
pass
&=
ck
::
utils
::
check_err
(
output_scaled_casted
.
mData
,
host_output_scaled_casted
.
mData
,
"Error: Incorrect results scaled"
,
1e-3
,
1e-3
);
pass
&=
ck
::
utils
::
check_err
(
output_amax
.
mData
,
host_output_amax
.
mData
,
"Error: Incorrect results amax"
,
1e-3
,
1e-3
);
}
return
pass
?
0
:
1
;
}
include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp
View file @
f21cda25
...
@@ -419,6 +419,12 @@ struct UnaryAbs
...
@@ -419,6 +419,12 @@ struct UnaryAbs
y
=
ck
::
math
::
abs
(
x
);
y
=
ck
::
math
::
abs
(
x
);
};
};
template
<
>
__host__
__device__
void
operator
()(
f8_t
&
y
,
const
f8_t
&
x
)
const
{
y
=
ck
::
type_convert
<
f8_t
>
(
ck
::
math
::
abs
(
ck
::
type_convert
<
float
>
(
x
)));
};
};
};
struct
UnarySqrt
struct
UnarySqrt
...
...
include/ck/utility/math_v2.hpp
View file @
f21cda25
...
@@ -80,6 +80,8 @@ static inline __host__ bool isnan(half_t x)
...
@@ -80,6 +80,8 @@ static inline __host__ bool isnan(half_t x)
return
(
xx
&
0x7FFF
)
>
0x7C00
;
return
(
xx
&
0x7FFF
)
>
0x7C00
;
};
};
static
inline
__host__
bool
isnan
(
f8_t
x
)
{
return
(
x
&
0x80
);
};
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
static
inline
__host__
bool
isnan
(
int4_t
x
)
static
inline
__host__
bool
isnan
(
int4_t
x
)
{
{
...
@@ -529,6 +531,8 @@ static inline __device__ bool isnan(half_t x)
...
@@ -529,6 +531,8 @@ static inline __device__ bool isnan(half_t x)
return
(
xx
&
0x7FFF
)
>
0x7C00
;
return
(
xx
&
0x7FFF
)
>
0x7C00
;
};
};
static
inline
__device__
bool
isnan
(
f8_t
x
)
{
return
(
x
&
0x80
);
};
static
inline
__device__
half_t
sqrt
(
half_t
x
)
static
inline
__device__
half_t
sqrt
(
half_t
x
)
{
{
return
static_cast
<
half_t
>
(
__builtin_amdgcn_sqrtf
(
static_cast
<
float
>
(
x
)));
return
static_cast
<
half_t
>
(
__builtin_amdgcn_sqrtf
(
static_cast
<
float
>
(
x
)));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment