Commit b30a17c3 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent d64ab071
......@@ -20,8 +20,8 @@ namespace op {
struct convert : unary<convert>
{
shape::type_t target_type = shape::half_type;
float scale = 1.0f;
float shift = 0.0f;
float scale = 1.0f;
float shift = 0.0f;
template <class Self, class F>
static auto reflect(Self& self, F f)
......@@ -41,7 +41,7 @@ struct convert : unary<convert>
}
convert(shape::type_t t) : target_type{t} {}
convert(shape::type_t t, float sle, float sft) : target_type{t}, scale {sle}, shift{sft} {}
convert(shape::type_t t, float sle, float sft) : target_type{t}, scale{sle}, shift{sft} {}
convert() {}
};
......
......@@ -215,29 +215,33 @@ void quantize_int8(program& prog, const std::vector<std::string>& ins_names)
// used as scale and shift(.0f), which will generate results diffrent from
// the original results. To adjust the output to be "correct(approximatly
// equal)", we need additional calculation for the adjustment
if (ins->name() == "dot")
if(ins->name() == "dot")
{
auto dot_op = any_cast<op::dot>(ins->get_operator());
int32_t quant_alpha = static_cast<int32_t>(dot_op.alpha / (int8_param[0].first * int8_param[1].first) + 0.5f);
auto dot_op = any_cast<op::dot>(ins->get_operator());
int32_t quant_alpha = static_cast<int32_t>(
dot_op.alpha / (int8_param[0].first * int8_param[1].first) + 0.5f);
int32_t quant_beta = static_cast<int32_t>(dot_op.beta + 0.5f);
prog.replace_instruction(ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
}
else if (ins->name() == "convolution")
else if(ins->name() == "convolution")
{
// Current MIOpen convolution does not support alpha and beta,
// Current MIOpen convolution does not support alpha and beta,
// so we need a separate multiply to adjust the output
auto conv_op = any_cast<op::convolution>(ins->get_operator());
auto padding = conv_op.padding;
auto stride = conv_op.stride;
auto dilation = conv_op.dilation;
auto padding_mode = conv_op.padding_mode;
auto group = conv_op.group;
auto conv_op = any_cast<op::convolution>(ins->get_operator());
auto padding = conv_op.padding;
auto stride = conv_op.stride;
auto dilation = conv_op.dilation;
auto padding_mode = conv_op.padding_mode;
auto group = conv_op.group;
auto adjust_factor = 1.0 / int8_param[0].first * int8_param[1].first;
auto conv_res = prog.insert_instruction(ins, op::quant_convolution{padding, stride, dilation, padding_mode, group}, converted_inputs);
auto conv_res = prog.insert_instruction(
ins,
op::quant_convolution{padding, stride, dilation, padding_mode, group},
converted_inputs);
auto conv_lens = conv_res->get_shape().lens();
auto fl = prog.add_literal(literal(adjust_factor));
auto adj_fact = prog.insert_instruction(ins, op::multibroadcast{conv_lens}, fl);
auto fl = prog.add_literal(literal(adjust_factor));
auto adj_fact = prog.insert_instruction(ins, op::multibroadcast{conv_lens}, fl);
prog.replace_instruction(ins, adj_fact);
}
else
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment