Commit 0b769919 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent 6dc749f3
...@@ -12,9 +12,9 @@ namespace gpu { ...@@ -12,9 +12,9 @@ namespace gpu {
namespace device { namespace device {
argument logsoftmax(hipStream_t stream, argument logsoftmax(hipStream_t stream,
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
std::vector<migraphx::argument> args, std::vector<migraphx::argument> args,
int axis) int axis)
{ {
auto lens = output_shape.lens(); auto lens = output_shape.lens();
...@@ -25,28 +25,28 @@ argument logsoftmax(hipStream_t stream, ...@@ -25,28 +25,28 @@ argument logsoftmax(hipStream_t stream,
migraphx::shape comp_shape{output_shape.type(), {batch_size, n_dims}}; migraphx::shape comp_shape{output_shape.type(), {batch_size, n_dims}};
visit_all(args.back(), args.front())([&](auto output, auto input) { visit_all(args.back(), args.front())([&](auto output, auto input) {
const auto *input_ptr = device_cast(input.data()); const auto* input_ptr = device_cast(input.data());
auto *output_ptr = device_cast(output.data()); auto* output_ptr = device_cast(output.data());
// each thread is for one item in the batch // each thread is for one item in the batch
gs_launch(stream, batch_size)([=](auto i) { gs_launch(stream, batch_size)([=](auto i) {
std::size_t row_start = i * n_dims; std::size_t row_start = i * n_dims;
// get max // get max
auto batch_max = input_ptr[row_start]; auto batch_max = input_ptr[row_start];
for (std::size_t j = 1; j < n_dims; ++j) for(std::size_t j = 1; j < n_dims; ++j)
{ {
auto ind = row_start + j; auto ind = row_start + j;
batch_max = std::max(to_hip_type(batch_max), to_hip_type(input_ptr[ind])); batch_max = std::max(to_hip_type(batch_max), to_hip_type(input_ptr[ind]));
} }
for (std::size_t j = 0; j < n_dims; ++j) for(std::size_t j = 0; j < n_dims; ++j)
{ {
auto ind = row_start + j; auto ind = row_start + j;
output_ptr[ind] = input_ptr[ind] - batch_max; output_ptr[ind] = input_ptr[ind] - batch_max;
} }
auto batch_sum = output_ptr[row_start]; auto batch_sum = output_ptr[row_start];
for (std::size_t j = 1; j < n_dims; ++j) for(std::size_t j = 1; j < n_dims; ++j)
{ {
auto ind = row_start + j; auto ind = row_start + j;
batch_sum += ::exp(to_hip_type(output_ptr[ind])); batch_sum += ::exp(to_hip_type(output_ptr[ind]));
...@@ -54,7 +54,7 @@ argument logsoftmax(hipStream_t stream, ...@@ -54,7 +54,7 @@ argument logsoftmax(hipStream_t stream,
batch_sum = ::log(to_hip_type(batch_sum)); batch_sum = ::log(to_hip_type(batch_sum));
for (std::size_t j = 0; j < n_dims; ++j) for(std::size_t j = 0; j < n_dims; ++j)
{ {
auto ind = row_start + j; auto ind = row_start + j;
output_ptr[ind] -= batch_sum; output_ptr[ind] -= batch_sum;
......
...@@ -11,9 +11,9 @@ namespace gpu { ...@@ -11,9 +11,9 @@ namespace gpu {
namespace device { namespace device {
argument logsoftmax(hipStream_t stream, argument logsoftmax(hipStream_t stream,
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
std::vector<migraphx::argument> args, std::vector<migraphx::argument> args,
int axis); int axis);
} // namespace device } // namespace device
} // namespace gpu } // namespace gpu
......
...@@ -16,8 +16,8 @@ shape hip_logsoftmax::compute_shape(const std::vector<shape>& inputs) const ...@@ -16,8 +16,8 @@ shape hip_logsoftmax::compute_shape(const std::vector<shape>& inputs) const
} }
argument hip_logsoftmax::compute(context& ctx, argument hip_logsoftmax::compute(context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& args) const const std::vector<argument>& args) const
{ {
return device::logsoftmax(ctx.get_stream().get(), output_shape, args, op.axis); return device::logsoftmax(ctx.get_stream().get(), output_shape, args, op.axis);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment