"router/vscode:/vscode.git/clone" did not exist on "31e2253ae721ea80032283b9e85ffe51945e5a55"
Commit e862412f authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent 1222d174
......@@ -44,8 +44,8 @@ struct quant_convolution
const shape& input = inputs.at(0);
const shape& weights = inputs.at(1);
auto t = input.type();
if (t != shape::int8_type)
if(t != shape::int8_type)
{
MIGRAPHX_THROW("QUANT_THROW: only accept input of type int8_t");
}
......
......@@ -920,11 +920,11 @@ struct cpu_apply
void init()
{
apply_map["im2col"] = extend_op<cpu_im2col, op::im2col>();
apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>();
apply_map["im2col"] = extend_op<cpu_im2col, op::im2col>();
apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>();
apply_map["quant_convolution"] = extend_op<cpu_quant_convolution, op::quant_convolution>();
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
......
......@@ -57,7 +57,7 @@ inline tensor_descriptor make_tensor(const migraphx::shape& s)
return t;
}
template<class T>
template <class T>
inline convolution_descriptor make_conv(const T& op)
{
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
......
......@@ -12,8 +12,8 @@ shape miopen_quant_convolution::compute_shape(const std::vector<shape>& inputs)
return op.compute_shape({inputs.at(0), inputs.at(1)});
}
argument miopen_quant_convolution::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
const shape& output_shape,
const std::vector<argument>& args) const
{
auto x_desc = make_tensor(args[0].get_shape());
auto w_desc = make_tensor(args[1].get_shape());
......@@ -21,7 +21,7 @@ argument miopen_quant_convolution::compute(context& ctx,
int8_t alpha = 1;
int8_t beta = 0;
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
args[0].implicit(),
......@@ -39,8 +39,8 @@ argument miopen_quant_convolution::compute(context& ctx,
}
shape miopen_quant_convolution::compile(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
const shape& output_shape,
std::vector<shape> inputs)
{
shape workspace_shape{};
auto x_desc = make_tensor(inputs[0]);
......@@ -85,8 +85,8 @@ shape miopen_quant_convolution::compile(context& ctx,
}
void miopen_quant_convolution::finalize(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
const shape& output_shape,
std::vector<shape> inputs)
{
if(handle == ctx.get_stream().get_miopen())
return;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment