"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "892f9ea0db18d1bef81ea45221f53745c03509f5"
Unverified Commit b8aee2e9 authored by David's avatar David Committed by GitHub
Browse files

Remove unused module DETR based models (#30823)

* removing heads for classification from DETR models.

* quality fix
parent be3aa43e
...@@ -1091,25 +1091,6 @@ class ConditionalDetrDecoderLayer(nn.Module): ...@@ -1091,25 +1091,6 @@ class ConditionalDetrDecoderLayer(nn.Module):
return outputs return outputs
# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr
class ConditionalDetrClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP
class MLP(nn.Module): class MLP(nn.Module):
""" """
......
...@@ -1066,25 +1066,6 @@ class DeformableDetrDecoderLayer(nn.Module): ...@@ -1066,25 +1066,6 @@ class DeformableDetrDecoderLayer(nn.Module):
return outputs return outputs
# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead
class DeformableDetrClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class DeformableDetrPreTrainedModel(PreTrainedModel): class DeformableDetrPreTrainedModel(PreTrainedModel):
config_class = DeformableDetrConfig config_class = DeformableDetrConfig
base_model_prefix = "model" base_model_prefix = "model"
......
...@@ -1032,25 +1032,6 @@ class DetaDecoderLayer(nn.Module): ...@@ -1032,25 +1032,6 @@ class DetaDecoderLayer(nn.Module):
return outputs return outputs
# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead
class DetaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class DetaPreTrainedModel(PreTrainedModel): class DetaPreTrainedModel(PreTrainedModel):
config_class = DetaConfig config_class = DetaConfig
base_model_prefix = "model" base_model_prefix = "model"
......
...@@ -875,24 +875,6 @@ class DetrDecoderLayer(nn.Module): ...@@ -875,24 +875,6 @@ class DetrDecoderLayer(nn.Module):
return outputs return outputs
class DetrClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class DetrPreTrainedModel(PreTrainedModel): class DetrPreTrainedModel(PreTrainedModel):
config_class = DetrConfig config_class = DetrConfig
base_model_prefix = "model" base_model_prefix = "model"
......
...@@ -782,25 +782,6 @@ class TableTransformerDecoderLayer(nn.Module): ...@@ -782,25 +782,6 @@ class TableTransformerDecoderLayer(nn.Module):
return outputs return outputs
# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->TableTransformer
class TableTransformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class TableTransformerPreTrainedModel(PreTrainedModel): class TableTransformerPreTrainedModel(PreTrainedModel):
config_class = TableTransformerConfig config_class = TableTransformerConfig
base_model_prefix = "model" base_model_prefix = "model"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment