Unverified Commit 8abe8dea authored by Yineng Zhang's avatar Yineng Zhang Committed by GitHub
Browse files

fix: dsv3 lite q_lora_rank none (#9815)

parent 1e85589d
...@@ -2414,18 +2414,26 @@ class DeepseekV2ForCausalLM(nn.Module): ...@@ -2414,18 +2414,26 @@ class DeepseekV2ForCausalLM(nn.Module):
) )
num_hidden_layers = 1 if is_nextn else self.config.num_hidden_layers num_hidden_layers = 1 if is_nextn else self.config.num_hidden_layers
for layer_id in range(num_hidden_layers): for layer_id in range(num_hidden_layers):
if is_nextn: if is_nextn:
layer = self.model.decoder layer = self.model.decoder
else: else:
layer = self.model.layers[layer_id] layer = self.model.layers[layer_id]
for module in [ module_list = [
layer.self_attn.fused_qkv_a_proj_with_mqa,
layer.self_attn.q_b_proj,
layer.self_attn.kv_b_proj, layer.self_attn.kv_b_proj,
layer.self_attn.o_proj, layer.self_attn.o_proj,
]: ]
if self.config.q_lora_rank is not None:
module_list.append(layer.self_attn.fused_qkv_a_proj_with_mqa)
module_list.append(layer.self_attn.q_b_proj)
else:
module_list.append(layer.self_attn.kv_a_proj_with_mqa)
module_list.append(layer.self_attn.q_proj)
for module in module_list:
requant_weight_ue8m0_inplace( requant_weight_ue8m0_inplace(
module.weight, module.weight_scale_inv, weight_block_size module.weight, module.weight_scale_inv, weight_block_size
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment