Unverified Commit 0f6ac5e2 authored by Adam Yanxiao Zhao's avatar Adam Yanxiao Zhao Committed by GitHub
Browse files

[Bug Fix] Fix Glm4vVisionBlock norm (#9884)

parent 29850900
......@@ -93,9 +93,8 @@ class Glm4vVisionBlock(Qwen2_5_VisionBlock):
quant_config=quant_config,
prefix=prefix,
num_dummy_heads=config.num_dummy_heads,
rms_norm_eps=config.rms_norm_eps,
)
self.norm1 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.norm2 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = Glm4vVisionMLP(
config.hidden_size,
......
......@@ -113,12 +113,13 @@ class Qwen2_5_VisionBlock(nn.Module):
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
num_dummy_heads: int = 0,
rms_norm_eps: float = 1e-6,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.norm1 = RMSNorm(dim, eps=1e-6)
self.norm2 = RMSNorm(dim, eps=1e-6)
self.norm1 = RMSNorm(dim, eps=rms_norm_eps)
self.norm2 = RMSNorm(dim, eps=rms_norm_eps)
if attn_implementation is None:
softmax_in_single_precision = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment