Commit d016c513 authored by Casper Hansen's avatar Casper Hansen
Browse files

Fix unexpected keyword

parent 3fa74007
...@@ -146,7 +146,8 @@ class QuantAttentionFused(nn.Module): ...@@ -146,7 +146,8 @@ class QuantAttentionFused(nn.Module):
def forward( def forward(
self, self,
hidden_states:torch.Tensor, past_key_value=None, attention_mask=None, position_ids=None, output_attentions=False, use_cache=False hidden_states:torch.Tensor, past_key_value=None, attention_mask=None, position_ids=None,
output_attentions=False, use_cache=False, *args, **kwargs
): ):
bsz, seqlen, _ = hidden_states.shape bsz, seqlen, _ = hidden_states.shape
if bsz != self.cache_batch_size: if bsz != self.cache_batch_size:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment