pos_encoding_kernels.cu 4.57 KB
Newer Older
1
2
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
3
#include <c10/cuda/CUDAGuard.h>
4

5
#include "cuda_compat.h"
6
7
#include "dispatch_utils.h"

Woosuk Kwon's avatar
Woosuk Kwon committed
8
namespace vllm {
9

10
11
12
13
14
15
16
17
18
19
20
21
22
23
template<typename scalar_t, bool IS_NEOX>
inline __device__ void apply_rotary_embedding(
  scalar_t* __restrict__ arr,
  const scalar_t* __restrict__ cos_ptr,
  const scalar_t* __restrict__ sin_ptr,
  int rot_offset,
  int embed_dim)
{
  int x_index, y_index;
  scalar_t cos, sin;
  if (IS_NEOX) {
    // GPT-NeoX style rotary embedding.
    x_index = rot_offset;
    y_index = embed_dim + rot_offset;
24
25
    cos = VLLM_LDG(cos_ptr + x_index);
    sin = VLLM_LDG(sin_ptr + x_index);
26
27
28
29
  } else {
    // GPT-J style rotary embedding.
    x_index = 2 * rot_offset;
    y_index = 2 * rot_offset + 1;
30
31
    cos = VLLM_LDG(cos_ptr + x_index / 2);
    sin = VLLM_LDG(sin_ptr + x_index / 2);
32
33
34
35
36
37
38
39
40
41
  }

  const scalar_t x = arr[x_index];
  const scalar_t y = arr[y_index];
  arr[x_index] = x * cos - y * sin;
  arr[y_index] = y * cos + x * sin;
}

template<typename scalar_t, bool IS_NEOX>
__global__ void rotary_embedding_kernel(
42
43
44
  const int64_t* __restrict__ positions,        // [batch_size, seq_len] or [num_tokens]
  scalar_t* __restrict__ query,                 // [batch_size, seq_len, num_heads, head_size] or [num_tokens, num_heads, head_size]
  scalar_t* __restrict__ key,                   // [batch_size, seq_len, num_kv_heads, head_size] or [num_tokens, num_kv_heads, head_size]
45
46
  const scalar_t* __restrict__ cos_sin_cache,   // [max_position, 2, rot_dim // 2]
  const int rot_dim,
47
48
  const int64_t query_stride,
  const int64_t key_stride,
49
  const int num_heads,
Zhuohan Li's avatar
Zhuohan Li committed
50
  const int num_kv_heads,
51
52
53
54
  const int head_size) {
  // Each thread block is responsible for one token.
  const int token_idx = blockIdx.x;
  int64_t pos = positions[token_idx];
55
  const scalar_t* cache_ptr = cos_sin_cache + pos * rot_dim;
56

57
  const int embed_dim = rot_dim / 2;
58
59
60
  const scalar_t* cos_ptr = cache_ptr;
  const scalar_t* sin_ptr = cache_ptr + embed_dim;

Zhuohan Li's avatar
Zhuohan Li committed
61
62
  const int nq = num_heads * embed_dim;
  for (int i = threadIdx.x; i < nq; i += blockDim.x) {
Woosuk Kwon's avatar
Woosuk Kwon committed
63
    const int head_idx = i / embed_dim;
64
    const int64_t token_head = token_idx * query_stride + head_idx * head_size;
Woosuk Kwon's avatar
Woosuk Kwon committed
65
    const int rot_offset = i % embed_dim;
66
67
    apply_rotary_embedding<scalar_t, IS_NEOX>(query + token_head, cos_ptr,
                                              sin_ptr, rot_offset, embed_dim);
Zhuohan Li's avatar
Zhuohan Li committed
68
69
70
71
72
  }

  const int nk = num_kv_heads * embed_dim;
  for (int i = threadIdx.x; i < nk; i += blockDim.x) {
    const int head_idx = i / embed_dim;
73
    const int64_t token_head = token_idx * key_stride + head_idx * head_size;
Zhuohan Li's avatar
Zhuohan Li committed
74
    const int rot_offset = i % embed_dim;
75
76
    apply_rotary_embedding<scalar_t, IS_NEOX>(key + token_head, cos_ptr,
                                              sin_ptr, rot_offset, embed_dim);
77
78
79
  }
}

Woosuk Kwon's avatar
Woosuk Kwon committed
80
} // namespace vllm
81

82
void rotary_embedding(
83
84
85
  torch::Tensor& positions,         // [batch_size, seq_len] or [num_tokens]
  torch::Tensor& query,             // [batch_size, seq_len, num_heads * head_size] or [num_tokens, num_heads * head_size]
  torch::Tensor& key,               // [batch_size, seq_len, num_kv_heads * head_size] or [num_tokens, num_kv_heads * head_size]
86
  int head_size,
87
88
  torch::Tensor& cos_sin_cache,     // [max_position, rot_dim]
  bool is_neox) {
Antoni Baum's avatar
Antoni Baum committed
89
  int64_t num_tokens = query.numel() / query.size(-1);
90
  int rot_dim = cos_sin_cache.size(1);
91
92
  int num_heads = query.size(-1) / head_size;
  int num_kv_heads = key.size(-1) / head_size;
93
94
  int64_t query_stride = query.stride(-2);
  int64_t key_stride = key.stride(-2);
95
96

  dim3 grid(num_tokens);
97
  dim3 block(std::min(num_heads * rot_dim / 2, 512));
98
  const at::cuda::OptionalCUDAGuard device_guard(device_of(query));
99
  const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
100
  VLLM_DISPATCH_FLOATING_TYPES(
101
    query.scalar_type(),
102
    "rotary_embedding",
103
    [&] {
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
      if (is_neox) {
        vllm::rotary_embedding_kernel<scalar_t, true><<<grid, block, 0, stream>>>(
          positions.data_ptr<int64_t>(),
          query.data_ptr<scalar_t>(),
          key.data_ptr<scalar_t>(),
          cos_sin_cache.data_ptr<scalar_t>(),
          rot_dim,
          query_stride,
          key_stride,
          num_heads,
          num_kv_heads,
          head_size);
      } else {
        vllm::rotary_embedding_kernel<scalar_t, false><<<grid, block, 0, stream>>>(
          positions.data_ptr<int64_t>(),
          query.data_ptr<scalar_t>(),
          key.data_ptr<scalar_t>(),
          cos_sin_cache.data_ptr<scalar_t>(),
          rot_dim,
          query_stride,
          key_stride,
          num_heads,
          num_kv_heads,
          head_size);
      }
129
130
    });
}