Unverified Commit 2695ab05 authored by Yun Dai's avatar Yun Dai Committed by GitHub
Browse files

Fix loading KV quantization scale; Enable modelopt kv cache (#4686)


Co-authored-by: default avatarqingquansong <ustcsqq@gmail.com>
parent 88d6fd9a
......@@ -145,6 +145,7 @@ class InternLM2Attention(nn.Module):
self.scaling,
self.num_kv_heads,
layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -170,6 +170,7 @@ class LlamaAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -146,6 +146,7 @@ class MiniCPMAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -192,6 +192,7 @@ class MiniCPM3Attention(nn.Module):
self.scaling,
num_kv_heads=self.num_local_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......@@ -343,6 +344,7 @@ class MiniCPM3AttentionMLA(nn.Module):
num_kv_heads=1,
layer_id=layer_id,
v_head_dim=self.kv_lora_rank,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -169,6 +169,7 @@ class MixtralAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -232,6 +232,7 @@ class MixtralAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -535,6 +535,7 @@ class MllamaTextCrossAttention(nn.Module):
self.num_local_key_value_heads,
layer_id=layer_id,
is_cross_attention=True,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -93,6 +93,7 @@ class OlmoAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -118,6 +118,7 @@ class Olmo2Attention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -170,6 +170,7 @@ class OlmoeAttention(nn.Module):
self.scaling,
layer_id=layer_id,
num_kv_heads=self.num_kv_heads,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -202,6 +202,7 @@ class Phi3SmallSelfAttention(nn.Module):
self.scale,
num_kv_heads=self.num_kv_heads_per_partion,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -133,6 +133,7 @@ class QWenAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -154,6 +154,7 @@ class Qwen2Attention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -231,6 +231,7 @@ class Qwen2MoeAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -149,6 +149,7 @@ class StablelmAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_key_value_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -153,6 +153,7 @@ class XverseAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -252,6 +252,7 @@ class XverseAttention(nn.Module):
self.scaling,
num_kv_heads=self.num_kv_heads,
layer_id=layer_id,
quant_config=quant_config,
prefix=add_prefix("attn", prefix),
)
......
......@@ -37,11 +37,6 @@ DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST = (
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
"nvidia/Llama-3.1-8B-Instruct-FP8"
)
# TODO(yundai424): right now specifying to an older revision since the latest one
# carries kv cache quantization which doesn't work yet
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_REVISION = (
"13858565416dbdc0b4e7a4a677fadfbd5b9e5bb9"
)
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment