Commit 58356312 authored by coder-chin's avatar coder-chin Committed by binmakeswell
Browse files

fix format (#564)

parent e014144c
...@@ -120,7 +120,7 @@ __global__ void ker_attn_softmax(T *inp, const T *attn_mask, int from_len, ...@@ -120,7 +120,7 @@ __global__ void ker_attn_softmax(T *inp, const T *attn_mask, int from_len,
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i], BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len); to_len);
} }
} // blockIdx.x } // blockIdx.x
} }
template <typename T, int block_dim, int ele_per_thread> template <typename T, int block_dim, int ele_per_thread>
...@@ -198,7 +198,7 @@ __global__ void ker_attn_softmax_lt32(T *inp, const T *attn_mask, int from_len, ...@@ -198,7 +198,7 @@ __global__ void ker_attn_softmax_lt32(T *inp, const T *attn_mask, int from_len,
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i], BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len); to_len);
} }
} // blockIdx.x } // blockIdx.x
} }
/* /*
...@@ -304,7 +304,8 @@ __global__ void ker_attn_softmax_bw(T *grad, const T *inp, int softmax_length) { ...@@ -304,7 +304,8 @@ __global__ void ker_attn_softmax_bw(T *grad, const T *inp, int softmax_length) {
cg::thread_block b = cg::this_thread_block(); cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); for (int i = 1; i < WARP_SIZE; i <<= 1)
sum += g.shfl_xor(sum, i);
#pragma unroll #pragma unroll
for (int i = 0; i < ITERATIONS; ++i) { for (int i = 0; i < ITERATIONS; ++i) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment