Commit 7eaffe8f authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent 90cfe474
......@@ -15,8 +15,7 @@ struct hip_int8_gemm_pack_a
{
std::string name() const { return "gpu::int8_gemm_pack_a"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape&, const std::vector<argument>& args) const;
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......@@ -27,8 +26,7 @@ struct hip_int8_gemm_pack_b
{
std::string name() const { return "gpu::int8_gemm_pack_b"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape&, const std::vector<argument>& args) const;
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......
......@@ -12,8 +12,8 @@ shape miopen_int8_conv_pack::compute_shape(const std::vector<shape>& inputs) con
}
argument miopen_int8_conv_pack::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
const shape& output_shape,
const std::vector<argument>& args) const
{
auto arg_desc = make_tensor(args[0].get_shape());
auto arg_desc_vec4 = make_tensor(args[0].get_shape(), true);
......
......@@ -12,9 +12,8 @@ shape hip_int8_gemm_pack_a::compute_shape(const std::vector<shape>& inputs) cons
return inputs.at(0);
}
argument hip_int8_gemm_pack_a::compute(context& ctx,
const shape&,
const std::vector<argument>& args) const
argument
hip_int8_gemm_pack_a::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::int8_gemm_pack_a(ctx.get_stream().get(), args[1], args[0]);
return args[1];
......@@ -26,9 +25,8 @@ shape hip_int8_gemm_pack_b::compute_shape(const std::vector<shape>& inputs) cons
return inputs.at(0);
}
argument hip_int8_gemm_pack_b::compute(context& ctx,
const shape&,
const std::vector<argument>& args) const
argument
hip_int8_gemm_pack_b::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::int8_gemm_pack_b(ctx.get_stream().get(), args[1], args[0]);
return args[1];
......
......@@ -133,7 +133,7 @@ struct miopen_apply
add_lrn_op();
add_convolution_op();
add_quant_convolution_op();
//add_quant_dot_op();
// add_quant_dot_op();
add_pooling_op();
add_batch_norm_inference_op();
}
......@@ -187,12 +187,14 @@ struct miopen_apply
auto conv = miopen_quant_convolution{op, make_conv(op)};
auto ws = conv.compile(ctx, ins->get_shape(), to_shapes(ins->inputs()));
auto args = ins->inputs();
auto args = ins->inputs();
auto arg_x_vec4 = insert_allocation(ins, conv.pack_int8_shape(args[0]->get_shape()));
auto arg_x_packed = prog->insert_instruction(ins, miopen_int8_conv_pack{}, {args[0], arg_x_vec4});
auto arg_x_packed =
prog->insert_instruction(ins, miopen_int8_conv_pack{}, {args[0], arg_x_vec4});
auto arg_y_vec4 = insert_allocation(ins, conv.pack_int8_shape(args[1]->get_shape()));
auto arg_y_packed = prog->insert_instruction(ins, miopen_int8_conv_pack{}, {args[1], arg_y_vec4});
auto arg_y_packed =
prog->insert_instruction(ins, miopen_int8_conv_pack{}, {args[1], arg_y_vec4});
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
......
......@@ -16,26 +16,26 @@ argument miopen_quant_convolution::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
auto x_desc = make_tensor(args[0].get_shape(), true);
auto w_desc = make_tensor(args[1].get_shape(), true);
auto y_desc = make_tensor(output_shape);
auto x_desc = make_tensor(args[0].get_shape(), true);
auto w_desc = make_tensor(args[1].get_shape(), true);
auto y_desc = make_tensor(output_shape);
float alpha = 1;
float beta = 0;
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
args[0].implicit(),
w_desc.get(),
args[1].implicit(),
cd.get(),
algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
args[2].get_shape().bytes());
&alpha,
x_desc.get(),
args[0].implicit(),
w_desc.get(),
args[1].implicit(),
cd.get(),
algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
args[2].get_shape().bytes());
if(status != miopenStatusSuccess)
{
MIGRAPHX_THROW("QUANT_CONVOLUTION: run convolution forward failed");
......@@ -62,10 +62,10 @@ shape miopen_quant_convolution::compile(context& ctx,
&workspace_size);
workspace_shape = shape{shape::int8_type, {workspace_size}};
auto arg_vec4_x = to_gpu(generate_argument(pack_int8_shape(inputs[0])));
auto arg_vec4_w = to_gpu(generate_argument(pack_int8_shape(inputs[1])));
auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape);
auto arg_vec4_x = to_gpu(generate_argument(pack_int8_shape(inputs[0])));
auto arg_vec4_w = to_gpu(generate_argument(pack_int8_shape(inputs[1])));
auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape);
int algo_count = 1;
miopenConvAlgoPerf_t perf;
......
......@@ -61,8 +61,8 @@ shape rocblas_quant_gemm::compute_shape(const std::vector<shape>& inputs) const
}
argument rocblas_quant_gemm::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
const shape& output_shape,
const std::vector<argument>& args) const
{
bool transa = args[0].get_shape().transposed();
bool transb = args[1].get_shape().transposed();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment