Unverified Commit a611ac9b authored by JayL0321's avatar JayL0321 Committed by GitHub
Browse files

remove unused is_decoder parameter in DetrAttention (#24226)

* issue#24161 remove unused is_decoder parameter in DetrAttention

* #24161 fix check_repository_consistency fail
parent 33196b45
...@@ -534,7 +534,6 @@ class DetrAttention(nn.Module): ...@@ -534,7 +534,6 @@ class DetrAttention(nn.Module):
embed_dim: int, embed_dim: int,
num_heads: int, num_heads: int,
dropout: float = 0.0, dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True, bias: bool = True,
): ):
super().__init__() super().__init__()
......
...@@ -499,7 +499,6 @@ class DetrAttention(nn.Module): ...@@ -499,7 +499,6 @@ class DetrAttention(nn.Module):
embed_dim: int, embed_dim: int,
num_heads: int, num_heads: int,
dropout: float = 0.0, dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True, bias: bool = True,
): ):
super().__init__() super().__init__()
...@@ -697,7 +696,6 @@ class DetrDecoderLayer(nn.Module): ...@@ -697,7 +696,6 @@ class DetrDecoderLayer(nn.Module):
embed_dim=self.embed_dim, embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads, num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.dropout = config.dropout self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function] self.activation_fn = ACT2FN[config.activation_function]
...@@ -708,7 +706,6 @@ class DetrDecoderLayer(nn.Module): ...@@ -708,7 +706,6 @@ class DetrDecoderLayer(nn.Module):
self.embed_dim, self.embed_dim,
config.decoder_attention_heads, config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
......
...@@ -416,7 +416,6 @@ class DetrAttention(nn.Module): ...@@ -416,7 +416,6 @@ class DetrAttention(nn.Module):
embed_dim: int, embed_dim: int,
num_heads: int, num_heads: int,
dropout: float = 0.0, dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True, bias: bool = True,
): ):
super().__init__() super().__init__()
...@@ -545,7 +544,6 @@ class DetrDecoderLayer(nn.Module): ...@@ -545,7 +544,6 @@ class DetrDecoderLayer(nn.Module):
embed_dim=self.embed_dim, embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads, num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.dropout = config.dropout self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function] self.activation_fn = ACT2FN[config.activation_function]
...@@ -556,7 +554,6 @@ class DetrDecoderLayer(nn.Module): ...@@ -556,7 +554,6 @@ class DetrDecoderLayer(nn.Module):
self.embed_dim, self.embed_dim,
config.decoder_attention_heads, config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
......
...@@ -440,7 +440,6 @@ class TableTransformerAttention(nn.Module): ...@@ -440,7 +440,6 @@ class TableTransformerAttention(nn.Module):
embed_dim: int, embed_dim: int,
num_heads: int, num_heads: int,
dropout: float = 0.0, dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True, bias: bool = True,
): ):
super().__init__() super().__init__()
...@@ -642,7 +641,6 @@ class TableTransformerDecoderLayer(nn.Module): ...@@ -642,7 +641,6 @@ class TableTransformerDecoderLayer(nn.Module):
embed_dim=self.embed_dim, embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads, num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.dropout = config.dropout self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function] self.activation_fn = ACT2FN[config.activation_function]
...@@ -653,7 +651,6 @@ class TableTransformerDecoderLayer(nn.Module): ...@@ -653,7 +651,6 @@ class TableTransformerDecoderLayer(nn.Module):
self.embed_dim, self.embed_dim,
config.decoder_attention_heads, config.decoder_attention_heads,
dropout=config.attention_dropout, dropout=config.attention_dropout,
is_decoder=True,
) )
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment