Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
b7f500f0
"vscode:/vscode.git/clone" did not exist on "b9a708d5b3383d68a03a97513c5d6ec2c5eec76e"
Commit
b7f500f0
authored
Nov 28, 2022
by
rocking5566
Committed by
rocking
Nov 28, 2022
Browse files
Merge branch 'develop' into gemm_layernorm_welford
parents
694057a7
4e6a5575
Changes
26
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
794 additions
and
1 deletion
+794
-1
profiler/include/profile_batchnorm_forward_impl.hpp
profiler/include/profile_batchnorm_forward_impl.hpp
+440
-0
profiler/src/profile_batchnorm_fwd.cpp
profiler/src/profile_batchnorm_fwd.cpp
+234
-0
profiler/src/profiler.cpp
profiler/src/profiler.cpp
+7
-1
test/CMakeLists.txt
test/CMakeLists.txt
+1
-0
test/batchnorm_fwd/CMakeLists.txt
test/batchnorm_fwd/CMakeLists.txt
+2
-0
test/batchnorm_fwd/batchnorm_fwd_rank_4.cpp
test/batchnorm_fwd/batchnorm_fwd_rank_4.cpp
+110
-0
No files found.
profiler/include/profile_batchnorm_forward_impl.hpp
0 → 100644
View file @
b7f500f0
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <iomanip>
#include <stdexcept>
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/tensor_operation_instance/gpu/batchnorm_forward.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_forward.hpp"
namespace
ck
{
namespace
profiler
{
template
<
typename
XDataType
,
typename
YDataType
,
typename
AccDataType
,
typename
ScaleDataType
,
typename
BiasDataType
,
typename
MeanVarDataType
,
index_t
Rank
,
index_t
NumBatchNormReduceDim
>
bool
profile_batchnorm_forward_impl
(
int
do_verification
,
int
init_method
,
bool
do_dumpout
,
bool
time_kernel
,
const
std
::
vector
<
size_t
>
inOutLengths
,
const
std
::
vector
<
int
>
reduceDims
,
bool
updateMovingAverage
,
bool
saveMeanAndInvVariance
,
double
averageFactor
,
double
epsilon
)
{
if
(
inOutLengths
.
size
()
!=
Rank
||
reduceDims
.
size
()
!=
NumBatchNormReduceDim
)
{
throw
std
::
runtime_error
(
"Invalid tensor lengths or number of reduce dimensions!"
);
};
std
::
vector
<
size_t
>
scaleBiasMeanVarLengths
;
// used for calculating the effective transferred bytes by each operation
size_t
total_length
;
size_t
invariant_length
=
1
;
total_length
=
std
::
accumulate
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
1
,
std
::
multiplies
<
size_t
>
{});
if
(
std
::
any_of
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
[](
int
d
)
{
return
d
<
0
||
d
>=
Rank
;
}))
throw
std
::
runtime_error
(
"Invalid reduce dimensions!"
);
for
(
int
dim
=
0
;
dim
<
Rank
;
dim
++
)
{
if
(
std
::
none_of
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
[
&
](
int
d
)
{
return
dim
==
d
;
}))
{
scaleBiasMeanVarLengths
.
push_back
(
inOutLengths
[
dim
]);
invariant_length
*=
inOutLengths
[
dim
];
};
}
// input data of the batchnorm forward algorithm
Tensor
<
XDataType
>
x
(
inOutLengths
);
Tensor
<
ScaleDataType
>
bnScale
(
scaleBiasMeanVarLengths
);
Tensor
<
BiasDataType
>
bnBias
(
scaleBiasMeanVarLengths
);
// output data of the batchnorm forward algorithm
Tensor
<
YDataType
>
y_ref
(
inOutLengths
);
Tensor
<
YDataType
>
y
(
inOutLengths
);
Tensor
<
MeanVarDataType
>
resultSaveMean_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
resultSaveInvVariance_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
resultRunningMean_ref
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
resultRunningVariance_ref
(
scaleBiasMeanVarLengths
);
auto
inOutStrides
=
x
.
mDesc
.
GetStrides
();
auto
scaleBiasMeanVarStrides
=
bnScale
.
mDesc
.
GetStrides
();
std
::
size_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
if
(
updateMovingAverage
)
{
if
constexpr
(
ck
::
is_same_v
<
XDataType
,
int8_t
>
)
{
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
XDataType
>
{
-
5
,
5
},
num_thread
);
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
2.5
f
;
const
float
noise_stddev
=
0.04
f
;
resultRunningMean_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
resultRunningVariance_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
}
else
{
const
float
x_mean
=
0.0
f
;
const
float
x_stddev
=
1.0
f
;
const
float
noise_stddev
=
0.04
f
;
// input data in normal distribution
x
.
GenerateTensorValue
(
GeneratorTensor_4
<
XDataType
>
{
x_mean
,
x_stddev
},
num_thread
);
// initialize the runningMean to be values with tiny variation to the mean of the x
// values
resultRunningMean_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_mean
,
noise_stddev
},
num_thread
);
// initialize the runningVariance to be values with tiny variation to the variance of
// the x values
resultRunningVariance_ref
.
GenerateTensorValue
(
GeneratorTensor_4
<
MeanVarDataType
>
{
x_stddev
*
x_stddev
,
noise_stddev
},
num_thread
);
};
}
else
{
if
constexpr
(
ck
::
is_same_v
<
XDataType
,
int8_t
>
)
x
.
GenerateTensorValue
(
GeneratorTensor_2
<
XDataType
>
{
-
5
,
5
},
num_thread
);
else
x
.
GenerateTensorValue
(
GeneratorTensor_3
<
XDataType
>
{
-
1.0
f
,
1.0
f
},
num_thread
);
};
if
(
do_verification
)
{
if
constexpr
(
ck
::
is_same_v
<
ScaleDataType
,
int8_t
>
&&
ck
::
is_same_v
<
BiasDataType
,
int8_t
>
)
{
bnScale
.
GenerateTensorValue
(
GeneratorTensor_2
<
ScaleDataType
>
{
-
5
,
5
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_2
<
BiasDataType
>
{
-
5
,
5
},
num_thread
);
}
else
{
switch
(
init_method
)
{
case
0
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_0
<
ScaleDataType
>
{},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_0
<
BiasDataType
>
{},
num_thread
);
break
;
case
1
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_1
<
ScaleDataType
>
{
1
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_1
<
BiasDataType
>
{
0
},
num_thread
);
break
;
case
2
:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_2
<
ScaleDataType
>
{
-
5
,
5
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_2
<
BiasDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
bnScale
.
GenerateTensorValue
(
GeneratorTensor_3
<
ScaleDataType
>
{
-
1.0
f
,
1.0
f
},
num_thread
);
bnBias
.
GenerateTensorValue
(
GeneratorTensor_3
<
BiasDataType
>
{
-
1.0
f
,
1.0
f
},
num_thread
);
}
};
};
// these buffers are usually provided by the user application
DeviceMem
x_dev
(
sizeof
(
XDataType
)
*
x
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
y_dev
(
sizeof
(
XDataType
)
*
y
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnScale_dev
(
sizeof
(
ScaleDataType
)
*
bnScale
.
mDesc
.
GetElementSpaceSize
());
DeviceMem
bnBias_dev
(
sizeof
(
BiasDataType
)
*
bnBias
.
mDesc
.
GetElementSpaceSize
());
// mean_dev or resultSaveMean_dev
DeviceMem
resultSaveMean_dev
(
sizeof
(
MeanVarDataType
)
*
resultSaveMean_ref
.
mDesc
.
GetElementSpaceSize
());
// meansquare_dev or resultSaveInvVariance_dev
DeviceMem
resultSaveInvVariance_dev
(
sizeof
(
MeanVarDataType
)
*
resultSaveInvVariance_ref
.
mDesc
.
GetElementSpaceSize
());
// resultRunningMean_dev
DeviceMem
resultRunningMean_dev
(
sizeof
(
MeanVarDataType
)
*
resultRunningMean_ref
.
mDesc
.
GetElementSpaceSize
());
// resultRunningVariance_dev
DeviceMem
resultRunningVariance_dev
(
sizeof
(
MeanVarDataType
)
*
resultRunningVariance_ref
.
mDesc
.
GetElementSpaceSize
());
x_dev
.
ToDevice
(
x
.
mData
.
data
());
bnScale_dev
.
ToDevice
(
bnScale
.
mData
.
data
());
bnBias_dev
.
ToDevice
(
bnBias
.
mData
.
data
());
if
(
updateMovingAverage
)
{
resultRunningMean_dev
.
ToDevice
(
resultRunningMean_ref
.
mData
.
data
());
resultRunningVariance_dev
.
ToDevice
(
resultRunningVariance_ref
.
mData
.
data
());
};
// used for storing the device result for verification when updateMovingAverage is enabled
Tensor
<
MeanVarDataType
>
resultRunningMean
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
resultRunningVariance
(
scaleBiasMeanVarLengths
);
// used for storing the device result for verification when saveMeanAndInvVariance is enabled
Tensor
<
MeanVarDataType
>
resultSaveMean
(
scaleBiasMeanVarLengths
);
Tensor
<
MeanVarDataType
>
resultSaveInvVariance
(
scaleBiasMeanVarLengths
);
std
::
array
<
index_t
,
Rank
>
arrInOutLengths
;
std
::
array
<
index_t
,
Rank
>
arrInOutStrides
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
arrScaleBiasMeanVarLengths
;
std
::
array
<
index_t
,
Rank
-
NumBatchNormReduceDim
>
arrScaleBiasMeanVarStrides
;
std
::
array
<
int
,
NumBatchNormReduceDim
>
arrReduceDims
;
std
::
copy
(
inOutLengths
.
begin
(),
inOutLengths
.
end
(),
arrInOutLengths
.
begin
());
std
::
copy
(
inOutStrides
.
begin
(),
inOutStrides
.
end
(),
arrInOutStrides
.
begin
());
std
::
copy
(
scaleBiasMeanVarLengths
.
begin
(),
scaleBiasMeanVarLengths
.
end
(),
arrScaleBiasMeanVarLengths
.
begin
());
std
::
copy
(
scaleBiasMeanVarStrides
.
begin
(),
scaleBiasMeanVarStrides
.
end
(),
arrScaleBiasMeanVarStrides
.
begin
());
std
::
copy
(
reduceDims
.
begin
(),
reduceDims
.
end
(),
arrReduceDims
.
begin
());
using
PassThroughOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
// add device batchnorm-forward instances
using
DeviceOp
=
ck
::
tensor_operation
::
device
::
DeviceBatchNormFwd
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
PassThroughOp
,
Rank
,
NumBatchNormReduceDim
>
;
// get device op instances
const
auto
instance_ptrs
=
ck
::
tensor_operation
::
device
::
instance
::
DeviceOperationInstanceFactory
<
DeviceOp
>::
GetInstances
();
std
::
cout
<<
"found "
<<
instance_ptrs
.
size
()
<<
" instances"
<<
std
::
endl
;
std
::
string
best_instance_name
;
float
best_avg_time
=
std
::
numeric_limits
<
float
>::
max
();
float
best_gb_per_sec
=
0
;
if
(
do_verification
)
{
using
ReferenceBatchNormFwdInstance
=
ck
::
tensor_operation
::
host
::
ReferenceBatchNormFwd
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
PassThroughOp
,
Rank
,
NumBatchNormReduceDim
>
;
auto
batchNormFwd_ref
=
ReferenceBatchNormFwdInstance
{};
auto
argument_ptr_ref
=
batchNormFwd_ref
.
MakeArgumentPointer
(
arrInOutLengths
,
arrInOutStrides
,
arrInOutStrides
,
arrReduceDims
,
arrScaleBiasMeanVarLengths
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
x
.
mData
.
data
(),
bnScale
.
mData
.
data
(),
bnBias
.
mData
.
data
(),
epsilon
,
PassThroughOp
{},
y_ref
.
mData
.
data
(),
saveMeanAndInvVariance
?
resultSaveMean_ref
.
mData
.
data
()
:
nullptr
,
saveMeanAndInvVariance
?
resultSaveInvVariance_ref
.
mData
.
data
()
:
nullptr
,
averageFactor
,
updateMovingAverage
?
resultRunningMean_ref
.
mData
.
data
()
:
nullptr
,
updateMovingAverage
?
resultRunningVariance_ref
.
mData
.
data
()
:
nullptr
);
if
(
!
batchNormFwd_ref
.
IsSupportedArgument
(
argument_ptr_ref
.
get
()))
{
std
::
cout
<<
"The runtime parameters not supported by the reference instance, exiting!"
<<
std
::
endl
;
return
(
false
);
};
auto
invoker_ptr_ref
=
batchNormFwd_ref
.
MakeInvokerPointer
();
(
void
)
invoker_ptr_ref
->
Run
(
argument_ptr_ref
.
get
());
}
int
num_kernel
=
0
;
bool
pass
=
true
;
for
(
auto
&
inst_ptr
:
instance_ptrs
)
{
auto
argument_ptr
=
inst_ptr
->
MakeArgumentPointer
(
arrInOutLengths
,
arrInOutStrides
,
arrInOutStrides
,
arrReduceDims
,
arrScaleBiasMeanVarLengths
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
arrScaleBiasMeanVarStrides
,
x_dev
.
GetDeviceBuffer
(),
bnScale_dev
.
GetDeviceBuffer
(),
bnBias_dev
.
GetDeviceBuffer
(),
epsilon
,
PassThroughOp
{},
y_dev
.
GetDeviceBuffer
(),
saveMeanAndInvVariance
?
resultSaveMean_dev
.
GetDeviceBuffer
()
:
nullptr
,
saveMeanAndInvVariance
?
resultSaveInvVariance_dev
.
GetDeviceBuffer
()
:
nullptr
,
averageFactor
,
updateMovingAverage
?
resultRunningMean_dev
.
GetDeviceBuffer
()
:
nullptr
,
updateMovingAverage
?
resultRunningVariance_dev
.
GetDeviceBuffer
()
:
nullptr
);
if
(
inst_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
num_kernel
++
;
}
else
{
if
(
time_kernel
)
{
std
::
cout
<<
inst_ptr
->
GetTypeString
()
<<
" skipped due to unsupported argument: "
<<
std
::
endl
;
}
continue
;
};
size_t
workspace_sz
=
inst_ptr
->
GetWorkSpaceSize
(
argument_ptr
.
get
());
DeviceMem
workspace_dev
(
workspace_sz
);
inst_ptr
->
SetWorkSpacePointer
(
argument_ptr
.
get
(),
workspace_dev
.
GetDeviceBuffer
());
auto
invoker_ptr
=
inst_ptr
->
MakeInvokerPointer
();
float
avg_time
=
invoker_ptr
->
Run
(
argument_ptr
.
get
(),
StreamConfig
{
nullptr
,
time_kernel
});
size_t
num_bytes
=
0
;
// inputing of x, scale, bias, outputing of y
num_bytes
+=
total_length
*
(
sizeof
(
XDataType
)
+
sizeof
(
YDataType
))
+
invariant_length
*
(
sizeof
(
ScaleDataType
)
+
sizeof
(
BiasDataType
));
// outputing of mean, inv-variance
num_bytes
+=
saveMeanAndInvVariance
?
invariant_length
*
sizeof
(
MeanVarDataType
)
*
2
:
0
;
// updating of moving mean, variance
num_bytes
+=
updateMovingAverage
?
invariant_length
*
sizeof
(
MeanVarDataType
)
*
4
:
0
;
float
gb_per_sec
=
num_bytes
/
1.E6
/
avg_time
;
if
(
time_kernel
)
std
::
cout
<<
"Perf: "
<<
avg_time
<<
" ms, "
<<
gb_per_sec
<<
" GB/s, "
<<
inst_ptr
->
GetTypeString
()
<<
std
::
endl
;
if
(
avg_time
<
best_avg_time
)
{
best_instance_name
=
inst_ptr
->
GetTypeString
();
best_avg_time
=
avg_time
;
best_gb_per_sec
=
gb_per_sec
;
}
if
(
do_verification
)
{
using
ck
::
utils
::
check_err
;
bool
single_pass
;
y_dev
.
FromDevice
(
y
.
mData
.
data
());
if
constexpr
(
ck
::
is_same_v
<
YDataType
,
ck
::
bhalf_t
>
)
single_pass
=
check_err
(
y
.
mData
,
y_ref
.
mData
,
"y results"
,
1e-2
,
1e-2
);
else
single_pass
=
check_err
(
y
.
mData
,
y_ref
.
mData
,
"y results"
,
4e-3
,
4e-3
);
if
(
updateMovingAverage
)
{
resultRunningMean_dev
.
FromDevice
(
resultRunningMean
.
mData
.
data
());
resultRunningVariance_dev
.
FromDevice
(
resultRunningVariance
.
mData
.
data
());
// clang-format off
single_pass
=
single_pass
&&
check_err
(
resultRunningMean
.
mData
,
resultRunningMean_ref
.
mData
,
"average mean results"
,
1.5e-5
,
1.5e-5
);
single_pass
=
single_pass
&&
check_err
(
resultRunningVariance
.
mData
,
resultRunningVariance_ref
.
mData
,
"average variance results"
,
1e-5
,
1e-5
);
// clang-format on
};
if
(
saveMeanAndInvVariance
)
{
resultSaveMean_dev
.
FromDevice
(
resultSaveMean
.
mData
.
data
());
resultSaveInvVariance_dev
.
FromDevice
(
resultSaveInvVariance
.
mData
.
data
());
// clang-format off
single_pass
=
single_pass
&&
check_err
(
resultSaveMean
.
mData
,
resultSaveMean_ref
.
mData
,
"mean results"
,
3e-5
,
3e-5
);
single_pass
=
single_pass
&&
check_err
(
resultSaveInvVariance
.
mData
,
resultSaveInvVariance_ref
.
mData
,
"inv-variance results"
,
7e-5
,
7e-5
);
// clang-format on
};
pass
=
pass
&&
single_pass
;
};
if
(
do_dumpout
)
{
using
ck
::
host_common
::
dumpBufferToFile
;
// clang-format off
dumpBufferToFile
(
"dump_x.bin"
,
x
.
mData
.
data
(),
x
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_y.bin"
,
y
.
mData
.
data
(),
y
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_y_ref.bin"
,
y_ref
.
mData
.
data
(),
y_ref
.
mDesc
.
GetElementSize
());
// clang-format off
if
(
saveMeanAndInvVariance
)
{
// clang-format off
dumpBufferToFile
(
"dump_mean.bin"
,
resultSaveMean
.
mData
.
data
(),
resultSaveMean
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_mean_ref.bin"
,
resultSaveMean_ref
.
mData
.
data
(),
resultSaveMean_ref
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_invvar.bin"
,
resultSaveInvVariance
.
mData
.
data
(),
resultSaveInvVariance
.
mDesc
.
GetElementSize
());
dumpBufferToFile
(
"dump_invvar_ref.bin"
,
resultSaveInvVariance_ref
.
mData
.
data
(),
resultSaveInvVariance_ref
.
mDesc
.
GetElementSize
());
// clang-format on
};
};
}
if
(
time_kernel
)
{
std
::
cout
<<
"best perf = "
<<
best_avg_time
<<
" ms, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_instance_name
<<
std
::
endl
;
}
if
(
num_kernel
==
0
)
{
std
::
cout
<<
"Error: No kernel is applicable"
<<
std
::
endl
;
return
false
;
}
return
pass
;
}
}
// namespace profiler
}
// namespace ck
profiler/src/profile_batchnorm_fwd.cpp
0 → 100644
View file @
b7f500f0
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <getopt.h>
#include "ck/library/utility/host_common_util.hpp"
#include "profiler/include/profile_batchnorm_forward_impl.hpp"
using
ck
::
index_t
;
using
namespace
std
;
static
const
struct
option
long_options
[]
=
{{
"inOutLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"reduceDims"
,
required_argument
,
nullptr
,
'R'
},
{
"dumpout"
,
required_argument
,
nullptr
,
'o'
},
{
"verify"
,
required_argument
,
nullptr
,
'v'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
BatchnormFwdArgParser
{
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inLengths
;
std
::
vector
<
int
>
reduceDims
;
bool
do_verification
=
false
;
bool
do_dumpout
=
false
;
bool
updateMovingAverage
;
bool
saveMeanAndInvVariance
;
int
data_type
=
0
;
int
init_method
=
2
;
bool
time_kernel
=
false
;
BatchnormFwdArgParser
()
=
default
;
~
BatchnormFwdArgParser
()
=
default
;
void
show_usage
(
const
char
*
cmd
)
{
// clang-format off
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inOutLengths or -D, comma separated list of input tensor dimension lengths, must have 4 integers for nhwc"
<<
std
::
endl
;
std
::
cout
<<
"--reduceDims or -R, comma separated list of dimensions to reduce on"
<<
std
::
endl
;
std
::
cout
<<
"--verify or -v, 1/0 to indicate whether to verify the result by comparing with the host-based batch-normalization"
<<
std
::
endl
;
std
::
cout
<<
"Arg1: data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2: 1/0 to indicate whether to update the moving average and variance (0=no, 1=yes)"
<<
std
::
endl
;
std
::
cout
<<
"Arg3: 1/0 to indicate whether to save the calculated mean and invVariance (0=no, 1=yes)"
<<
std
::
endl
;
std
::
cout
<<
"Arg4: init method used for bnScale and bnBias (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)"
<<
std
::
endl
;
std
::
cout
<<
"Arg5: time kernel (0=no, 1=yes)"
<<
std
::
endl
;
// clang-format on
};
int
operator
()(
int
argc
,
char
*
argv
[])
{
using
ck
::
host_common
::
getTypeValuesFromString
;
int
ch
;
optind
++
;
// to skip the module name
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:R:v:o:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
break
;
case
'R'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
reduceDims
=
getTypeValuesFromString
<
int
>
(
optarg
);
break
;
case
'v'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_verification
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'o'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
do_dumpout
=
static_cast
<
bool
>
(
std
::
atoi
(
optarg
));
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
-
1
;
};
break
;
default:
show_usage
(
argv
[
0
]);
std
::
cerr
<<
"Invalid cmd-line options!"
<<
std
::
endl
;
return
-
1
;
};
};
if
(
optind
+
5
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
updateMovingAverage
=
std
::
atoi
(
argv
[
optind
++
]);
saveMeanAndInvVariance
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
++
]);
time_kernel
=
static_cast
<
bool
>
(
std
::
atoi
(
argv
[
optind
++
]));
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
3
&&
data_type
!=
5
&&
data_type
!=
6
)
return
-
1
;
return
0
;
};
};
// end of class AppArgs
static
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
static
const
double
averageFactor
=
0.1
;
int
profile_batchnorm_forward
(
int
argc
,
char
*
argv
[])
{
using
ck
::
profiler
::
profile_batchnorm_forward_impl
;
BatchnormFwdArgParser
arg_parser
;
if
(
arg_parser
(
argc
,
argv
)
!=
0
)
return
-
1
;
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
BF16
=
ck
::
bhalf_t
;
using
I8
=
int8_t
;
using
F64
=
double
;
if
(
arg_parser
.
data_type
==
0
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_forward_impl
<
F16
,
F16
,
F32
,
F16
,
F16
,
F16
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
arg_parser
.
updateMovingAverage
,
arg_parser
.
saveMeanAndInvVariance
,
epsilon
,
averageFactor
);
};
}
else
if
(
arg_parser
.
data_type
==
1
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_forward_impl
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
arg_parser
.
updateMovingAverage
,
arg_parser
.
saveMeanAndInvVariance
,
epsilon
,
averageFactor
);
};
}
else
if
(
arg_parser
.
data_type
==
3
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_forward_impl
<
I8
,
I8
,
F32
,
I8
,
I8
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
arg_parser
.
updateMovingAverage
,
arg_parser
.
saveMeanAndInvVariance
,
epsilon
,
averageFactor
);
};
}
else
if
(
arg_parser
.
data_type
==
5
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_forward_impl
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
arg_parser
.
updateMovingAverage
,
arg_parser
.
saveMeanAndInvVariance
,
epsilon
,
averageFactor
);
};
}
else
if
(
arg_parser
.
data_type
==
6
)
{
if
(
arg_parser
.
inLengths
.
size
()
==
4
&&
arg_parser
.
reduceDims
.
size
()
==
3
)
{
profile_batchnorm_forward_impl
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
,
4
,
3
>
(
arg_parser
.
do_verification
,
arg_parser
.
init_method
,
arg_parser
.
do_dumpout
,
arg_parser
.
time_kernel
,
arg_parser
.
inLengths
,
arg_parser
.
reduceDims
,
arg_parser
.
updateMovingAverage
,
arg_parser
.
saveMeanAndInvVariance
,
epsilon
,
averageFactor
);
};
}
return
0
;
}
profiler/src/profiler.cpp
View file @
b7f500f0
...
@@ -24,6 +24,7 @@ int profile_softmax(int, char*[]);
...
@@ -24,6 +24,7 @@ int profile_softmax(int, char*[]);
int
profile_layernorm
(
int
,
char
*
[]);
int
profile_layernorm
(
int
,
char
*
[]);
int
profile_groupnorm
(
int
,
char
*
[]);
int
profile_groupnorm
(
int
,
char
*
[]);
int
profile_reduce
(
int
,
char
*
[]);
int
profile_reduce
(
int
,
char
*
[]);
int
profile_batchnorm_forward
(
int
,
char
*
[]);
static
void
print_helper_message
()
static
void
print_helper_message
()
{
{
...
@@ -46,7 +47,8 @@ static void print_helper_message()
...
@@ -46,7 +47,8 @@ static void print_helper_message()
" grouped_conv_fwd: Grouped Convolution Forward
\n
"
" grouped_conv_fwd: Grouped Convolution Forward
\n
"
" grouped_conv_bwd_weight: Grouped Convolution Backward Weight
\n
"
" grouped_conv_bwd_weight: Grouped Convolution Backward Weight
\n
"
" softmax: Softmax
\n
"
" softmax: Softmax
\n
"
" reduce: Reduce
\n
"
);
" reduce: Reduce
\n
"
" bnorm_fwd: Batchnorm forward
\n
"
);
// clang-format on
// clang-format on
}
}
...
@@ -142,6 +144,10 @@ int main(int argc, char* argv[])
...
@@ -142,6 +144,10 @@ int main(int argc, char* argv[])
{
{
return
profile_groupnorm
(
argc
,
argv
);
return
profile_groupnorm
(
argc
,
argv
);
}
}
else
if
(
strcmp
(
argv
[
1
],
"bnorm_fwd"
)
==
0
)
{
return
profile_batchnorm_forward
(
argc
,
argv
);
}
else
else
{
{
print_helper_message
();
print_helper_message
();
...
...
test/CMakeLists.txt
View file @
b7f500f0
...
@@ -53,3 +53,4 @@ add_subdirectory(softmax)
...
@@ -53,3 +53,4 @@ add_subdirectory(softmax)
add_subdirectory
(
normalization
)
add_subdirectory
(
normalization
)
add_subdirectory
(
data_type
)
add_subdirectory
(
data_type
)
add_subdirectory
(
elementwise_normalization
)
add_subdirectory
(
elementwise_normalization
)
add_subdirectory
(
batchnorm_fwd
)
test/batchnorm_fwd/CMakeLists.txt
0 → 100644
View file @
b7f500f0
add_gtest_executable
(
test_batchnorm_fwd_rank_4 batchnorm_fwd_rank_4.cpp
)
target_link_libraries
(
test_batchnorm_fwd_rank_4 PRIVATE utility device_batchnorm_instance
)
test/batchnorm_fwd/batchnorm_fwd_rank_4.cpp
0 → 100644
View file @
b7f500f0
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <cstdlib>
#include <iostream>
#include <initializer_list>
#include <vector>
#include <tuple>
#include <gtest/gtest.h>
#include "profiler/include/profile_batchnorm_forward_impl.hpp"
using
F16
=
ck
::
half_t
;
using
F32
=
float
;
using
BF16
=
ck
::
bhalf_t
;
using
I8
=
int8_t
;
using
F64
=
double
;
template
<
typename
Tuple
>
class
TestBatchNormFwdRank4
:
public
::
testing
::
Test
{
private:
const
double
epsilon
=
std
::
numeric_limits
<
float
>::
epsilon
();
const
double
averageFactor
=
0.1
;
protected:
using
XDataType
=
std
::
tuple_element_t
<
0
,
Tuple
>
;
using
YDataType
=
std
::
tuple_element_t
<
1
,
Tuple
>
;
using
AccDataType
=
std
::
tuple_element_t
<
2
,
Tuple
>
;
using
ScaleDataType
=
std
::
tuple_element_t
<
3
,
Tuple
>
;
using
BiasDataType
=
std
::
tuple_element_t
<
4
,
Tuple
>
;
using
MeanVarDataType
=
std
::
tuple_element_t
<
5
,
Tuple
>
;
std
::
vector
<
std
::
vector
<
size_t
>>
list_of_lengths
=
{
{
128
,
16
,
3
,
1024
},
{
128
,
16
,
6
,
512
},
{
1
,
1
,
1
,
1
},
{
4
,
4
,
4
,
4
},
{
32
,
32
,
32
,
32
}};
std
::
vector
<
int
>
reduceDims
;
template
<
int
NumReduceDim
>
void
Run
()
{
for
(
auto
&
inOutLengths
:
list_of_lengths
)
{
bool
pass
=
true
;
EXPECT_FALSE
(
reduceDims
.
size
()
!=
NumReduceDim
);
pass
=
pass
&&
ck
::
profiler
::
profile_batchnorm_forward_impl
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
4
,
NumReduceDim
>
(
true
,
3
,
false
,
false
,
inOutLengths
,
reduceDims
,
true
,
true
,
epsilon
,
averageFactor
);
pass
=
pass
&&
ck
::
profiler
::
profile_batchnorm_forward_impl
<
XDataType
,
YDataType
,
AccDataType
,
ScaleDataType
,
BiasDataType
,
MeanVarDataType
,
4
,
NumReduceDim
>
(
true
,
3
,
false
,
false
,
inOutLengths
,
reduceDims
,
false
,
false
,
epsilon
,
averageFactor
);
EXPECT_TRUE
(
pass
);
}
}
};
using
KernelTypes
=
::
testing
::
Types
<
std
::
tuple
<
F16
,
F16
,
F32
,
F16
,
F16
,
F32
>
,
std
::
tuple
<
F32
,
F32
,
F32
,
F32
,
F32
,
F32
>
,
std
::
tuple
<
BF16
,
BF16
,
F32
,
BF16
,
BF16
,
F32
>
,
std
::
tuple
<
I8
,
I8
,
F32
,
I8
,
I8
,
F32
>
,
std
::
tuple
<
F64
,
F64
,
F64
,
F64
,
F64
,
F64
>>
;
TYPED_TEST_SUITE
(
TestBatchNormFwdRank4
,
KernelTypes
);
// nhwc
TYPED_TEST
(
TestBatchNormFwdRank4
,
nhwc
)
{
this
->
reduceDims
=
{
0
,
1
,
2
};
this
->
template
Run
<
3
>();
}
// nchw
TYPED_TEST
(
TestBatchNormFwdRank4
,
nchw
)
{
this
->
reduceDims
=
{
0
,
2
,
3
};
this
->
template
Run
<
3
>();
}
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment