Commit 8e8ae66d authored by Chao Liu's avatar Chao Liu
Browse files

clean

parent 0e81cc18
......@@ -101,8 +101,7 @@ void print_helper_msg()
<< std::endl;
}
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -145,16 +144,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
} // namespace
......@@ -168,7 +167,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -98,8 +98,7 @@ void print_helper_msg()
<< std::endl;
}
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -142,16 +141,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
} // anonymous namespace
......@@ -165,7 +164,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -35,8 +35,7 @@ void print_helper_msg()
<< std::endl;
}
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -79,16 +78,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
// FIXME: current implementation only support NCHW/NHWC layout
......@@ -106,7 +105,7 @@ template <ck::index_t NDimSpatial,
int run_conv_fwd(bool do_verification,
int init_method,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& params,
const ck::utils::conv::ConvParam& conv_param,
const InElementOp& in_element_op,
const WeiElementOp& wei_element_op,
const OutElementOp& out_element_op)
......@@ -149,16 +148,16 @@ int run_conv_fwd(bool do_verification,
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N_,
params.K_,
params.C_,
params.input_spatial_lengths_,
params.filter_spatial_lengths_,
params.GetOutputSpatialLengths(),
params.conv_filter_strides_,
params.conv_filter_dilations_,
params.input_left_pads_,
params.input_right_pads_,
conv_param.N_,
conv_param.K_,
conv_param.C_,
conv_param.input_spatial_lengths_,
conv_param.filter_spatial_lengths_,
conv_param.GetOutputSpatialLengths(),
conv_param.conv_filter_strides_,
conv_param.conv_filter_dilations_,
conv_param.input_left_pads_,
conv_param.input_right_pads_,
in_element_op,
wei_element_op,
out_element_op);
......@@ -172,8 +171,8 @@ int run_conv_fwd(bool do_verification,
float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
std::size_t flop = params.GetFlops();
std::size_t num_btype = params.GetByte<InDataType, WeiDataType, OutDataType>();
std::size_t flop = conv_param.GetFlops();
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
float gb_per_sec = num_btype / 1.E6 / avg_time;
......@@ -197,10 +196,10 @@ int run_conv_fwd(bool do_verification,
auto ref_argument = ref_conv.MakeArgument(in,
wei,
out_host,
params.conv_filter_strides_,
params.conv_filter_dilations_,
params.input_left_pads_,
params.input_right_pads_,
conv_param.conv_filter_strides_,
conv_param.conv_filter_dilations_,
conv_param.input_left_pads_,
conv_param.input_right_pads_,
in_element_op,
wei_element_op,
out_element_op);
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -18,8 +18,7 @@
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -62,16 +61,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
void print_helper_msg()
......@@ -106,7 +105,7 @@ template <ck::index_t NDimSpatial,
int run_conv_bwd_data(bool do_verification,
int init_method,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& conv_param,
const ck::utils::conv::ConvParam& conv_param,
const InElementOp& in_element_op,
const WeiElementOp& wei_element_op,
const OutElementOp& out_element_op)
......
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 128, 256, 256, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
if(argc == 1)
......
......@@ -35,8 +35,7 @@ void print_helper_msg()
<< "split_k" << std::endl;
}
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -79,16 +78,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
// FIXME: current implementation only support NCHW/NHWC layout
......@@ -106,7 +105,7 @@ template <ck::index_t NDimSpatial,
int run_conv_bwd_weight(bool do_verification,
int init_method,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& params,
const ck::utils::conv::ConvParam& conv_param,
const InElementOp& in_element_op,
const WeiElementOp& wei_element_op,
const OutElementOp& out_element_op,
......@@ -153,16 +152,16 @@ int run_conv_bwd_weight(bool do_verification,
auto argument = conv.MakeArgument(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
params.N_,
params.K_,
params.C_,
params.input_spatial_lengths_,
params.filter_spatial_lengths_,
params.output_spatial_lengths_,
params.conv_filter_strides_,
params.conv_filter_dilations_,
params.input_left_pads_,
params.input_right_pads_,
conv_param.N_,
conv_param.K_,
conv_param.C_,
conv_param.input_spatial_lengths_,
conv_param.filter_spatial_lengths_,
conv_param.output_spatial_lengths_,
conv_param.conv_filter_strides_,
conv_param.conv_filter_dilations_,
conv_param.input_left_pads_,
conv_param.input_right_pads_,
in_element_op,
wei_element_op,
out_element_op,
......@@ -178,8 +177,8 @@ int run_conv_bwd_weight(bool do_verification,
float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
std::size_t flop = params.GetFlops();
std::size_t num_btype = params.GetByte<InDataType, WeiDataType, OutDataType>();
std::size_t flop = conv_param.GetFlops();
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
......@@ -206,10 +205,10 @@ int run_conv_bwd_weight(bool do_verification,
auto ref_argument = ref_conv.MakeArgument(in,
wei_host_result,
out,
params.conv_filter_strides_,
params.conv_filter_dilations_,
params.input_left_pads_,
params.input_right_pads_,
conv_param.conv_filter_strides_,
conv_param.conv_filter_dilations_,
conv_param.input_left_pads_,
conv_param.input_right_pads_,
InElementOp{},
WeiElementOp{},
OutElementOp{});
......
......@@ -70,7 +70,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 32, 256, 1024, {3, 3}, {14, 14}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
ck::index_t split_k = 4;
......
......@@ -69,7 +69,7 @@ int main(int argc, char* argv[])
bool time_kernel = false;
int num_dim_spatial = 2;
ck::tensor_operation::device::ConvParams params{
ck::utils::conv::ConvParam params{
2, 32, 256, 1024, {3, 3}, {14, 14}, {2, 2}, {1, 1}, {1, 1}, {1, 1}};
ck::index_t split_k = 4;
......
......@@ -12,8 +12,7 @@ namespace utils {
namespace conv {
template <typename InLayout>
HostTensorDescriptor
get_input_host_tensor_descriptor(const ck::tensor_operation::device::ConvParams& param)
HostTensorDescriptor get_input_host_tensor_descriptor(const ck::utils::conv::ConvParam& param)
{
if constexpr(ck::is_same_v<InLayout, ck::tensor_layout::convolution::NWC> ||
ck::is_same_v<InLayout, ck::tensor_layout::convolution::NHWC> ||
......@@ -48,8 +47,7 @@ get_input_host_tensor_descriptor(const ck::tensor_operation::device::ConvParams&
}
template <typename WeiLayout>
HostTensorDescriptor
get_weight_host_tensor_descriptor(const ck::tensor_operation::device::ConvParams& param)
HostTensorDescriptor get_weight_host_tensor_descriptor(const ck::utils::conv::ConvParam& param)
{
if constexpr(ck::is_same_v<WeiLayout, ck::tensor_layout::convolution::KXC> ||
ck::is_same_v<WeiLayout, ck::tensor_layout::convolution::KYXC> ||
......@@ -84,8 +82,7 @@ get_weight_host_tensor_descriptor(const ck::tensor_operation::device::ConvParams
}
template <typename OutLayout>
HostTensorDescriptor
get_output_host_tensor_descriptor(const ck::tensor_operation::device::ConvParams& param)
HostTensorDescriptor get_output_host_tensor_descriptor(const ck::utils::conv::ConvParam& param)
{
if constexpr(ck::is_same_v<OutLayout, ck::tensor_layout::convolution::NWK> ||
ck::is_same_v<OutLayout, ck::tensor_layout::convolution::NHWK> ||
......
......@@ -11,22 +11,22 @@
#include "ck/ck.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace utils {
namespace conv {
struct ConvParams
struct ConvParam
{
ConvParams();
ConvParams(ck::index_t n_dim,
ck::index_t n_batch,
ck::index_t n_out_channels,
ck::index_t n_in_channels,
const std::vector<ck::index_t>& filters_len,
const std::vector<ck::index_t>& input_len,
const std::vector<ck::index_t>& strides,
const std::vector<ck::index_t>& dilations,
const std::vector<ck::index_t>& left_pads,
const std::vector<ck::index_t>& right_pads);
ConvParam();
ConvParam(ck::index_t n_dim,
ck::index_t n_batch,
ck::index_t n_out_channels,
ck::index_t n_in_channels,
const std::vector<ck::index_t>& filters_len,
const std::vector<ck::index_t>& input_len,
const std::vector<ck::index_t>& strides,
const std::vector<ck::index_t>& dilations,
const std::vector<ck::index_t>& left_pads,
const std::vector<ck::index_t>& right_pads);
ck::index_t num_dim_spatial_;
ck::index_t N_;
......@@ -71,8 +71,8 @@ struct ConvParams
}
};
} // namespace device
} // namespace tensor_operation
} // namespace conv
} // namespace utils
} // namespace ck
std::ostream& operator<<(std::ostream& os, const ck::tensor_operation::device::ConvParams& p);
std::ostream& operator<<(std::ostream& os, const ck::utils::conv::ConvParam& p);
......@@ -5,19 +5,19 @@
#include "ck/library/utility/io.hpp"
namespace ck {
namespace tensor_operation {
namespace device {
namespace utils {
namespace conv {
ConvParams::ConvParams(ck::index_t n_dim,
ck::index_t n_batch,
ck::index_t n_out_channels,
ck::index_t n_in_channels,
const std::vector<ck::index_t>& filters_len,
const std::vector<ck::index_t>& input_len,
const std::vector<ck::index_t>& strides,
const std::vector<ck::index_t>& dilations,
const std::vector<ck::index_t>& left_pads,
const std::vector<ck::index_t>& right_pads)
ConvParam::ConvParam(ck::index_t n_dim,
ck::index_t n_batch,
ck::index_t n_out_channels,
ck::index_t n_in_channels,
const std::vector<ck::index_t>& filters_len,
const std::vector<ck::index_t>& input_len,
const std::vector<ck::index_t>& strides,
const std::vector<ck::index_t>& dilations,
const std::vector<ck::index_t>& left_pads,
const std::vector<ck::index_t>& right_pads)
: num_dim_spatial_(n_dim),
N_(n_batch),
K_(n_out_channels),
......@@ -38,7 +38,7 @@ ConvParams::ConvParams(ck::index_t n_dim,
static_cast<ck::index_t>(input_right_pads_.size()) != num_dim_spatial_)
{
throw(
std::runtime_error("ConvParams::ConvParams: "
std::runtime_error("ConvParam::ConvParam: "
"parameter size is different from number of declared dimensions!"));
}
......@@ -55,17 +55,17 @@ ConvParams::ConvParams(ck::index_t n_dim,
}
}
ConvParams::ConvParams()
: ConvParams::ConvParams(2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1})
ConvParam::ConvParam()
: ConvParam::ConvParam(2, 128, 256, 192, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1})
{
}
std::vector<ck::index_t> ConvParams::GetOutputSpatialLengths() const
std::vector<ck::index_t> ConvParam::GetOutputSpatialLengths() const
{
return output_spatial_lengths_;
}
std::size_t ConvParams::GetFlops() const
std::size_t ConvParam::GetFlops() const
{
// 2 * N * K * C * <output spatial lengths product> * <filter spatial lengths product>
return static_cast<std::size_t>(2) * N_ * K_ * C_ *
......@@ -79,13 +79,13 @@ std::size_t ConvParams::GetFlops() const
std::multiplies<std::size_t>());
}
} // namespace device
} // namespace tensor_operation
} // namespace conv
} // namespace utils
} // namespace ck
std::ostream& operator<<(std::ostream& os, const ck::tensor_operation::device::ConvParams& p)
std::ostream& operator<<(std::ostream& os, const ck::utils::conv::ConvParam& p)
{
os << "ConvParams {"
os << "ConvParam {"
<< "\nnum_dim_spatial: " << p.num_dim_spatial_ << "\nN: " << p.N_ << "\nK: " << p.K_
<< "\nC: " << p.C_ << "\nfilter_spatial_lengths: " << p.filter_spatial_lengths_
<< "\ninput_spatial_lengths: " << p.input_spatial_lengths_
......
......@@ -58,7 +58,7 @@ bool profile_conv_bwd_data_impl(int do_verification,
int init_method,
bool do_log,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& conv_param)
const ck::utils::conv::ConvParam& conv_param)
{
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
......
......@@ -63,7 +63,7 @@ bool profile_conv_bwd_weight_impl(int do_verification,
int init_method,
bool do_log,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& conv_param,
const ck::utils::conv::ConvParam& conv_param,
ck::index_t split_k)
{
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
......
......@@ -25,7 +25,6 @@
namespace ck {
namespace profiler {
// FIXME: only support NCHW and NHWC layout, need to be more general
template <ck::index_t NDimSpatial,
typename InLayout,
typename WeiLayout,
......@@ -37,7 +36,7 @@ bool profile_conv_fwd_impl(int do_verification,
int init_method,
bool do_log,
bool time_kernel,
const ck::tensor_operation::device::ConvParams& conv_param)
const ck::utils::conv::ConvParam& conv_param)
{
const auto in_desc = ck::utils::conv::get_input_host_tensor_descriptor<InLayout>(conv_param);
const auto wei_desc = ck::utils::conv::get_weight_host_tensor_descriptor<WeiLayout>(conv_param);
......
......@@ -51,8 +51,7 @@ static void print_helper_msg()
<< std::endl;
}
ck::tensor_operation::device::ConvParams
parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
ck::utils::conv::ConvParam parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
{
const ck::index_t N = std::stoi(argv[arg_idx++]);
const ck::index_t K = std::stoi(argv[arg_idx++]);
......@@ -95,16 +94,16 @@ parse_conv_params(int num_dim_spatial, int arg_idx, char* const argv[])
input_right_pads[i] = std::stoi(argv[arg_idx++]);
}
return ck::tensor_operation::device::ConvParams{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
return ck::utils::conv::ConvParam{num_dim_spatial,
N,
K,
C,
filter_spatial_lengths,
input_spatial_lengths,
conv_filter_strides,
conv_filter_dilations,
input_left_pads,
input_right_pads};
}
} // namespace
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment