Unverified Commit a6cc86df authored by Trevor Morris's avatar Trevor Morris Committed by GitHub
Browse files

Fix DSR1 accuracy for flashinfer_trtllm MoE with FP8 quantization (#11081)

parent 229d2b95
......@@ -575,9 +575,9 @@ class FusedMoE(torch.nn.Module):
)
# Flashinfer assumes w31 format for w13_weight. Same for the scales.
if (
should_use_flashinfer_trtllm_moe()
and self.quant_method.__class__.__name__ == "ModelOptNvFp4FusedMoEMethod"
if should_use_flashinfer_trtllm_moe() and (
isinstance(self.quant_method, ModelOptNvFp4FusedMoEMethod)
or isinstance(self.quant_method, Fp8MoEMethod)
):
shard_id = {"w1": "w3", "w3": "w1", "w2": "w2"}[shard_id]
......
......@@ -916,7 +916,7 @@ class ServerArgs:
if self.moe_runner_backend == "flashinfer_trtllm":
assert (
self.quantization == "modelopt_fp4" or self.quantization == "fp8"
), "modelopt_fp4 quantization is required for Flashinfer TRTLLM MoE"
), "modelopt_fp4 or fp8 quantization is required for Flashinfer TRTLLM MoE"
self.disable_shared_experts_fusion = True
logger.warning(
"FlashInfer TRTLLM MoE is enabled. --disable-shared-experts-fusion is automatically set."
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment