Commit fd523fd1 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

fixed a bug in the implementation

parent 3b1dc977
......@@ -105,7 +105,7 @@ void migemm_impl(tensor_view<T> cmat, tensor_view<T> amat, tensor_view<T> bmat,
}
template <class F>
void migemm(const argument& c_arg, const argument& a_arg, const argument& b_arg, F alpha, F beta)
void migemm_tpl(const argument& c_arg, const argument& a_arg, const argument& b_arg, F alpha, F beta)
{
visit_all(c_arg, a_arg, b_arg)(
[&](auto cmat, auto amat, auto bmat) { migemm_impl(cmat, amat, bmat, alpha, beta); });
......@@ -114,13 +114,13 @@ void migemm(const argument& c_arg, const argument& a_arg, const argument& b_arg,
void migemm(
const argument& c_arg, const argument& a_arg, const argument& b_arg, float alpha, float beta)
{
migemm(c_arg, a_arg, b_arg, alpha, beta);
migemm_tpl(c_arg, a_arg, b_arg, alpha, beta);
}
void migemm(
const argument& c_arg, const argument& a_arg, const argument& b_arg, int8_t alpha, int8_t beta)
{
migemm(c_arg, a_arg, b_arg, alpha, beta);
migemm_tpl(c_arg, a_arg, b_arg, alpha, beta);
}
} // namespace cpu
......
......@@ -876,7 +876,7 @@ struct cpu_apply
apply_map["im2col"] = extend_op<cpu_im2col, op::im2col>();
apply_map["convolution"] = extend_op<cpu_convolution, op::convolution>();
apply_map["dot"] = extend_op<cpu_gemm, op::dot>();
// apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["quant_dot"] = extend_op<cpu_quant_gemm, op::quant_dot>();
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment