Unverified Commit a695c186 authored by turboderp's avatar turboderp Committed by GitHub
Browse files

Fixes to alternating SWA layers in Gemma2 (#31775)

* HybridCache: Flip order of alternating global-attn/sliding-attn layers

* HybridCache: Read sliding_window argument from cache_kwargs

* Gemma2Model: Flip order of alternating global-attn/sliding-attn layers

* Code formatting
parent d625294d
...@@ -1148,7 +1148,7 @@ class HybridCache(Cache): ...@@ -1148,7 +1148,7 @@ class HybridCache(Cache):
config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
) )
self.is_sliding = torch.tensor( self.is_sliding = torch.tensor(
[i % 2 for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device [not bool(i % 2) for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device
) )
self.key_cache: List[torch.Tensor] = [] self.key_cache: List[torch.Tensor] = []
self.value_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = []
...@@ -1212,9 +1212,9 @@ class HybridCache(Cache): ...@@ -1212,9 +1212,9 @@ class HybridCache(Cache):
value_states: torch.Tensor, value_states: torch.Tensor,
layer_idx: int, layer_idx: int,
cache_kwargs: Optional[Dict[str, Any]] = None, cache_kwargs: Optional[Dict[str, Any]] = None,
sliding_window: Optional[int] = None,
) -> Tuple[torch.Tensor]: ) -> Tuple[torch.Tensor]:
cache_position = cache_kwargs.get("cache_position") cache_position = cache_kwargs.get("cache_position")
sliding_window = cache_kwargs.get("sliding_window")
self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device=key_states.device) self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device=key_states.device)
self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device=value_states.device) self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device=value_states.device)
k_out = self.key_cache[layer_idx] k_out = self.key_cache[layer_idx]
......
...@@ -216,7 +216,7 @@ class Gemma2Attention(nn.Module): ...@@ -216,7 +216,7 @@ class Gemma2Attention(nn.Module):
max_position_embeddings=self.max_position_embeddings, max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta, base=self.rope_theta,
) )
self.sliding_window = config.sliding_window if layer_idx % 2 else None self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None
def forward( def forward(
self, self,
...@@ -616,7 +616,7 @@ class Gemma2DecoderLayer(nn.Module): ...@@ -616,7 +616,7 @@ class Gemma2DecoderLayer(nn.Module):
self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.is_sliding = bool(layer_idx % 2) self.is_sliding = not bool(layer_idx % 2)
self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.sliding_window = config.sliding_window self.sliding_window = config.sliding_window
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment