Unverified Commit c224a4c6 authored by Baizhou Zhang's avatar Baizhou Zhang Committed by GitHub
Browse files

Fix log for chunked prefix cache (#11624)

parent 49345a68
...@@ -174,6 +174,15 @@ MLA_ATTENTION_BACKENDS = [ ...@@ -174,6 +174,15 @@ MLA_ATTENTION_BACKENDS = [
"nsa", "nsa",
] ]
CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS = [
"flashinfer",
"fa3",
"fa4",
"flashmla",
"cutlass_mla",
"trtllm_mla",
]
def add_mla_attention_backend(backend_name): def add_mla_attention_backend(backend_name):
if backend_name not in MLA_ATTENTION_BACKENDS: if backend_name not in MLA_ATTENTION_BACKENDS:
...@@ -604,7 +613,11 @@ class ModelRunner: ...@@ -604,7 +613,11 @@ class ModelRunner:
f"{self.model_config.hf_config.model_type}" f"{self.model_config.hf_config.model_type}"
) )
if not self.use_mla_backend: if (
not self.use_mla_backend
or server_args.attention_backend
not in CHUNKED_PREFIX_CACHE_SUPPORTED_ATTENTION_BACKENDS
):
server_args.disable_chunked_prefix_cache = True server_args.disable_chunked_prefix_cache = True
if not server_args.disable_chunked_prefix_cache: if not server_args.disable_chunked_prefix_cache:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment