"tests/models/autoencoders/test_models_autoencoder_kl.py" did not exist on "8421c1461bf4ab7801070d04d6ec1e6b28ee5b59"
attention.cpp 595 Bytes
Newer Older
1
#include <torch/extension.h>
Woosuk Kwon's avatar
Woosuk Kwon committed
2
#include <c10/util/Optional.h>
3
4
5
6
7
8
9
10
11
12

void single_query_cached_kv_attention(
  torch::Tensor& out,
  torch::Tensor& query,
  torch::Tensor& key_cache,
  torch::Tensor& value_cache,
  float scale,
  torch::Tensor& block_tables,
  torch::Tensor& context_lens,
  int block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
13
14
  int max_context_len,
  const c10::optional<torch::Tensor>& alibi_slopes);
15
16
17
18
19
20
21

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def(
    "single_query_cached_kv_attention",
    &single_query_cached_kv_attention,
    "Compute the attention between an input query and the cached key/value tensors");
}