Unverified Commit 3a765bd5 authored by Woosuk Kwon's avatar Woosuk Kwon Committed by GitHub
Browse files

Temporarily enforce eager mode for GPTQ models (#2154)

parent 26c52a5e
...@@ -185,6 +185,11 @@ class ModelConfig: ...@@ -185,6 +185,11 @@ class ModelConfig:
self.max_context_len_to_capture = self.max_model_len self.max_context_len_to_capture = self.max_model_len
self.max_context_len_to_capture = min(self.max_context_len_to_capture, self.max_context_len_to_capture = min(self.max_context_len_to_capture,
self.max_model_len) self.max_model_len)
if self.quantization == "gptq" and not self.enforce_eager:
# Related issue: https://github.com/vllm-project/vllm/issues/2147
logger.warning("GPTQ does not support CUDA graph yet. Disabling "
"CUDA graph.")
self.enforce_eager = True
def verify_with_parallel_config( def verify_with_parallel_config(
self, self,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment