Unverified Commit 2acd76f3 authored by Woosuk Kwon's avatar Woosuk Kwon Committed by GitHub
Browse files

[ROCm] Temporarily remove GPTQ ROCm support (#2138)

parent b81a6a6b
......@@ -219,13 +219,13 @@ vllm_extension_sources = [
"csrc/activation_kernels.cu",
"csrc/layernorm_kernels.cu",
"csrc/quantization/squeezellm/quant_cuda_kernel.cu",
"csrc/quantization/gptq/q_gemm.cu",
"csrc/cuda_utils_kernels.cu",
"csrc/pybind.cpp",
]
if _is_cuda():
vllm_extension_sources.append("csrc/quantization/awq/gemm_kernels.cu")
vllm_extension_sources.append("csrc/quantization/gptq/q_gemm.cu")
vllm_extension = CUDAExtension(
name="vllm._C",
......
......@@ -143,7 +143,7 @@ class ModelConfig:
def _verify_quantization(self) -> None:
supported_quantization = ["awq", "gptq", "squeezellm"]
rocm_not_supported_quantization = ["awq"]
rocm_not_supported_quantization = ["awq", "gptq"]
if self.quantization is not None:
self.quantization = self.quantization.lower()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment