Commit 6d1c23e9 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

clang format

parent b8782a5f
......@@ -561,26 +561,26 @@ struct cpu_softmax
par_for(batch_shape.elements(), [&](auto i) {
auto idx = compute_batch_indices(i, batch_shape);
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
batch_max[i] = std::max(batch_max[i], input(idx.begin(), idx.end()));
}
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
size_t index = output_shape.index(idx);
output[index] = std::exp(input[index] - batch_max[i]);
}
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
batch_sum[i] += output(idx.begin(), idx.end());
}
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
output(idx.begin(), idx.end()) /= batch_sum[i];
......@@ -638,20 +638,20 @@ struct cpu_logsoftmax
par_for(batch_shape.elements(), [&](auto i) {
auto idx = compute_batch_indices(i, batch_shape);
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
batch_max[i] = std::max(batch_max[i], input(idx.begin(), idx.end()));
}
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
size_t index = output_shape.index(idx);
output[index] = input[index] - batch_max[i];
}
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
batch_sum[i] += std::exp(output(idx.begin(), idx.end()));
......@@ -659,7 +659,7 @@ struct cpu_logsoftmax
batch_sum[i] = std::log(batch_sum[i]);
for (size_t j = 0; j < n_dims; ++j)
for(size_t j = 0; j < n_dims; ++j)
{
idx[op.axis] = j;
output(idx.begin(), idx.end()) -= batch_sum[i];
......
......@@ -35,7 +35,7 @@ argument logsoftmax(hipStream_t stream,
// the current optimization
const size_t max_block_size = 1024;
size_t block_size = 1;
while (block_size < max_block_size and block_size < n_dim)
while(block_size < max_block_size and block_size < n_dim)
{
block_size *= 2;
}
......@@ -57,7 +57,7 @@ argument logsoftmax(hipStream_t stream,
lds_data[block_size] = input_ptr[0];
for(size_t i = thr_idx; i < thread_num; i += block_size)
{
if (i < n_dims)
if(i < n_dims)
{
data_idx[axis] = i;
lds_data[thr_idx] = input_ptr[desc_data.linear(data_idx)];
......@@ -97,10 +97,11 @@ argument logsoftmax(hipStream_t stream,
item_num = n_dims;
for(size_t i = thr_idx; i < thread_num; i += block_size)
{
if (i < n_dims)
if(i < n_dims)
{
data_idx[axis] = i;
lds_data[thr_idx] = input_ptr[desc_data.linear(data_idx)] - lds_data[block_size];
lds_data[thr_idx] =
input_ptr[desc_data.linear(data_idx)] - lds_data[block_size];
lds_data[thr_idx] = ::exp(to_hip_type(lds_data[thr_idx]));
}
......
......@@ -33,7 +33,7 @@ argument softmax(hipStream_t stream,
// use one block for items in one batch.
const size_t max_block_size = 1024;
size_t block_size = 1;
while (block_size < max_block_size and block_size < n_dims)
while(block_size < max_block_size and block_size < n_dims)
{
block_size *= 2;
}
......@@ -56,7 +56,7 @@ argument softmax(hipStream_t stream,
lds_data[block_size + 1] = 0;
for(size_t i = thr_idx; i < thread_num; i += block_size)
{
if (i < n_dims)
if(i < n_dims)
{
data_idx[axis] = i;
lds_data[thr_idx] = input_ptr[desc_data.linear(data_idx)];
......@@ -95,10 +95,11 @@ argument softmax(hipStream_t stream,
item_num = n_dims;
for(size_t i = thr_idx; i < thread_num; i += block_size)
{
if (i < n_dims)
if(i < n_dims)
{
data_idx[axis] = i;
lds_data[thr_idx] = input_ptr[desc_data.linear(data_idx)] - lds_data[block_size];
lds_data[thr_idx] =
input_ptr[desc_data.linear(data_idx)] - lds_data[block_size];
lds_data[thr_idx] = ::exp(to_hip_type(lds_data[thr_idx]));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment