Unverified Commit 1effba4c authored by Michael Feil's avatar Michael Feil Committed by GitHub
Browse files

Configuration qwen2_moe.py - qkv_bias now in transformers (#5512)

parent a0fc5bc1
......@@ -262,8 +262,7 @@ class Qwen2MoeDecoderLayer(nn.Module):
rope_theta = getattr(config, "rope_theta", 10000)
rope_scaling = getattr(config, "rope_scaling", None)
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
# note: replace config.num_hidden_layers < 80 with True once its available in transformers 4.50.0
qkv_bias = getattr(config, "qkv_bias", config.num_hidden_layers < 80)
qkv_bias = getattr(config, "qkv_bias", True)
self.self_attn = Qwen2MoeAttention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment