Unverified Commit a564d10a authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Deprecate low use models (#30781)

* Deprecate models
- graphormer
- time_series_transformer
- xlm_prophetnet
- qdqbert
- nat
- ernie_m
- tvlt
- nezha
- mega
- jukebox
- vit_hybrid
- x_clip
- deta
- speech_to_text_2
- efficientformer
- realm
- gptsan_japanese

* Fix up

* Fix speech2text2 imports

* Make sure message isn't indented

* Fix docstrings

* Correctly map for deprecated models from model_type

* Uncomment out

* Add back time series transformer and x-clip

* Import fix and fix-up

* Fix up with updated ruff
parent 7f08817b
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {"configuration_vit_hybrid": ["ViTHybridConfig"]}
......
......@@ -14,10 +14,10 @@
# limitations under the License.
"""ViT Hybrid model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
from ..bit import BitConfig
from ....configuration_utils import PretrainedConfig
from ....utils import logging
from ...auto.configuration_auto import CONFIG_MAPPING
from ...bit import BitConfig
logger = logging.get_logger(__name__)
......
......@@ -18,14 +18,14 @@ from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ....image_transforms import (
convert_to_rgb,
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
from ....image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
......@@ -39,7 +39,7 @@ from ...image_utils import (
validate_kwargs,
validate_preprocess_arguments,
)
from ...utils import TensorType, is_vision_available, logging
from ....utils import TensorType, is_vision_available, logging
logger = logging.get_logger(__name__)
......
......@@ -23,12 +23,12 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ...utils.backbone_utils import load_backbone
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ....utils.backbone_utils import load_backbone
from .configuration_vit_hybrid import ViTHybridConfig
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
_import_structure = {
......
......@@ -16,8 +16,8 @@
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -25,10 +25,10 @@ import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import LayerNorm
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import (
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutput
from ....modeling_utils import PreTrainedModel
from ....utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
......
......@@ -18,8 +18,8 @@ import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ....tokenization_utils import PreTrainedTokenizer
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -71,7 +71,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
@dataclass
# Copied from transformers.models.nat.modeling_nat.NatEncoderOutput with Nat->Dinat
class DinatEncoderOutput(ModelOutput):
"""
Dinat encoder's outputs, with potential hidden states and attentions.
......@@ -105,7 +104,6 @@ class DinatEncoderOutput(ModelOutput):
@dataclass
# Copied from transformers.models.nat.modeling_nat.NatModelOutput with Nat->Dinat
class DinatModelOutput(ModelOutput):
"""
Dinat model's outputs that also contains a pooling of the last hidden states.
......@@ -142,7 +140,6 @@ class DinatModelOutput(ModelOutput):
@dataclass
# Copied from transformers.models.nat.modeling_nat.NatImageClassifierOutput with Nat->Dinat
class DinatImageClassifierOutput(ModelOutput):
"""
Dinat outputs for image classification.
......@@ -178,7 +175,6 @@ class DinatImageClassifierOutput(ModelOutput):
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
# Copied from transformers.models.nat.modeling_nat.NatEmbeddings with Nat->Dinat
class DinatEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
......@@ -201,7 +197,6 @@ class DinatEmbeddings(nn.Module):
return embeddings
# Copied from transformers.models.nat.modeling_nat.NatPatchEmbeddings with Nat->Dinat
class DinatPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
......@@ -238,7 +233,6 @@ class DinatPatchEmbeddings(nn.Module):
return embeddings
# Copied from transformers.models.nat.modeling_nat.NatDownsampler with Nat->Dinat
class DinatDownsampler(nn.Module):
"""
Convolutional Downsampling Layer.
......@@ -321,7 +315,6 @@ class NeighborhoodAttention(nn.Module):
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
# Copied from transformers.models.nat.modeling_nat.NeighborhoodAttention.transpose_for_scores with Nat->Dinat
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
......@@ -361,7 +354,6 @@ class NeighborhoodAttention(nn.Module):
return outputs
# Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionOutput
class NeighborhoodAttentionOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
......@@ -382,7 +374,6 @@ class NeighborhoodAttentionModule(nn.Module):
self.output = NeighborhoodAttentionOutput(config, dim)
self.pruned_heads = set()
# Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.prune_heads
def prune_heads(self, heads):
if len(heads) == 0:
return
......@@ -401,7 +392,6 @@ class NeighborhoodAttentionModule(nn.Module):
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
# Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.forward
def forward(
self,
hidden_states: torch.Tensor,
......@@ -413,7 +403,6 @@ class NeighborhoodAttentionModule(nn.Module):
return outputs
# Copied from transformers.models.nat.modeling_nat.NatIntermediate with Nat->Dinat
class DinatIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
......@@ -429,7 +418,6 @@ class DinatIntermediate(nn.Module):
return hidden_states
# Copied from transformers.models.nat.modeling_nat.NatOutput with Nat->Dinat
class DinatOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
......@@ -539,7 +527,6 @@ class DinatStage(nn.Module):
self.pointing = False
# Copied from transformers.models.nat.modeling_nat.NatStage.forward
def forward(
self,
hidden_states: torch.Tensor,
......@@ -582,7 +569,6 @@ class DinatEncoder(nn.Module):
]
)
# Copied from transformers.models.nat.modeling_nat.NatEncoder.forward with Nat->Dinat
def forward(
self,
hidden_states: torch.Tensor,
......@@ -687,7 +673,6 @@ DINAT_INPUTS_DOCSTRING = r"""
"The bare Dinat Model transformer outputting raw hidden-states without any specific head on top.",
DINAT_START_DOCSTRING,
)
# Copied from transformers.models.nat.modeling_nat.NatModel with Nat->Dinat, NAT->DINAT
class DinatModel(DinatPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
......
This diff is collapsed.
......@@ -72,6 +72,13 @@ class ErnieMTokenizer(metaclass=DummyObject):
requires_backends(self, ["sentencepiece"])
class XLMProphetNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class FNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
......@@ -233,13 +240,6 @@ class XGLMTokenizer(metaclass=DummyObject):
requires_backends(self, ["sentencepiece"])
class XLMProphetNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class XLMRobertaTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
......
......@@ -1038,168 +1038,168 @@ class TFDeiTPreTrainedModel(metaclass=DummyObject):
requires_backends(self, ["tf"])
class TFAdaptiveEmbedding(metaclass=DummyObject):
class TFEfficientFormerForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLForSequenceClassification(metaclass=DummyObject):
class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLLMHeadModel(metaclass=DummyObject):
class TFEfficientFormerModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLMainLayer(metaclass=DummyObject):
class TFEfficientFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLModel(metaclass=DummyObject):
class TFAdaptiveEmbedding(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLPreTrainedModel(metaclass=DummyObject):
class TFTransfoXLForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForMaskedLM(metaclass=DummyObject):
class TFTransfoXLLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForMultipleChoice(metaclass=DummyObject):
class TFTransfoXLMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForQuestionAnswering(metaclass=DummyObject):
class TFTransfoXLModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForSequenceClassification(metaclass=DummyObject):
class TFTransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForTokenClassification(metaclass=DummyObject):
class TFDistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertMainLayer(metaclass=DummyObject):
class TFDistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertModel(metaclass=DummyObject):
class TFDistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertPreTrainedModel(metaclass=DummyObject):
class TFDistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRContextEncoder(metaclass=DummyObject):
class TFDistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedContextEncoder(metaclass=DummyObject):
class TFDistilBertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject):
class TFDistilBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedReader(metaclass=DummyObject):
class TFDistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRQuestionEncoder(metaclass=DummyObject):
class TFDPRContextEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRReader(metaclass=DummyObject):
class TFDPRPretrainedContextEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEfficientFormerForImageClassification(metaclass=DummyObject):
class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject):
class TFDPRPretrainedReader(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEfficientFormerModel(metaclass=DummyObject):
class TFDPRQuestionEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEfficientFormerPreTrainedModel(metaclass=DummyObject):
class TFDPRReader(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
......
......@@ -121,6 +121,13 @@ class DebertaV2TokenizerFast(metaclass=DummyObject):
requires_backends(self, ["tokenizers"])
class RealmTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RetriBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
......@@ -352,13 +359,6 @@ class Qwen2TokenizerFast(metaclass=DummyObject):
requires_backends(self, ["tokenizers"])
class RealmTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class ReformerTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
......
......@@ -142,49 +142,63 @@ class DetaImageProcessor(metaclass=DummyObject):
requires_backends(self, ["vision"])
class DetrFeatureExtractor(metaclass=DummyObject):
class EfficientFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DetrImageProcessor(metaclass=DummyObject):
class TvltImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DonutFeatureExtractor(metaclass=DummyObject):
class ViTHybridImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DonutImageProcessor(metaclass=DummyObject):
class DetrFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTFeatureExtractor(metaclass=DummyObject):
class DetrImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTImageProcessor(metaclass=DummyObject):
class DonutFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class EfficientFormerImageProcessor(metaclass=DummyObject):
class DonutImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
......@@ -520,13 +534,6 @@ class Swin2SRImageProcessor(metaclass=DummyObject):
requires_backends(self, ["vision"])
class TvltImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class TvpImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
......@@ -590,13 +597,6 @@ class ViTImageProcessor(metaclass=DummyObject):
requires_backends(self, ["vision"])
class ViTHybridImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class VitMatteImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
......
This diff is collapsed.
This diff is collapsed.
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import ViTImageProcessor
class EfficientFormerImageProcessorTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
num_channels=3,
image_size=224,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class EfficientFormerImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ViTImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = EfficientFormerImageProcessorTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_proc_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment