Unverified Commit a564d10a authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Deprecate low use models (#30781)

* Deprecate models
- graphormer
- time_series_transformer
- xlm_prophetnet
- qdqbert
- nat
- ernie_m
- tvlt
- nezha
- mega
- jukebox
- vit_hybrid
- x_clip
- deta
- speech_to_text_2
- efficientformer
- realm
- gptsan_japanese

* Fix up

* Fix speech2text2 imports

* Make sure message isn't indented

* Fix docstrings

* Correctly map for deprecated models from model_type

* Uncomment out

* Add back time series transformer and x-clip

* Import fix and fix-up

* Fix up with updated ruff
parent 7f08817b
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {"configuration_qdqbert": ["QDQBertConfig"]}
......
......@@ -14,8 +14,8 @@
# limitations under the License.
"""QDQBERT model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -25,8 +25,8 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
......@@ -37,9 +37,9 @@ from ...modeling_outputs import (
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
......
......@@ -14,8 +14,8 @@
# limitations under the License.
"""REALM model configuration."""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -23,16 +23,16 @@ import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
ModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_realm import RealmConfig
......
......@@ -20,8 +20,8 @@ from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ... import AutoTokenizer
from ...utils import logging
from .... import AutoTokenizer
from ....utils import logging
_REALM_BLOCK_RECORDS_FILENAME = "block_records.npy"
......
......@@ -19,9 +19,9 @@ import os
import unicodedata
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...tokenization_utils_base import BatchEncoding
from ...utils import PaddingStrategy, logging
from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ....tokenization_utils_base import BatchEncoding
from ....utils import PaddingStrategy, logging
logger = logging.get_logger(__name__)
......
......@@ -19,9 +19,9 @@ from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from ....tokenization_utils_base import BatchEncoding
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
from ....utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
......
......@@ -14,8 +14,8 @@
# limitations under the License.
"""Speech2Text model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -22,11 +22,11 @@ import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, logging, replace_return_docstrings
from ....activations import ACT2FN
from ....modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
from ....modeling_utils import PreTrainedModel
from ....utils import add_start_docstrings, logging, replace_return_docstrings
from .configuration_speech_to_text_2 import Speech2Text2Config
......
......@@ -19,7 +19,7 @@ Speech processor class for Speech2Text2
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from ....processing_utils import ProcessorMixin
class Speech2Text2Processor(ProcessorMixin):
......
......@@ -18,8 +18,8 @@ import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ....tokenization_utils import PreTrainedTokenizer
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -17,7 +17,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
from ....utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
......
......@@ -14,8 +14,8 @@
# limitations under the License.
"""TVLT model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -19,9 +19,9 @@ from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
from ....audio_utils import mel_filter_bank, spectrogram, window_function
from ....feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ....utils import TensorType, logging
logger = logging.get_logger(__name__)
......
......@@ -18,13 +18,13 @@ from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ....image_transforms import (
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
from ....image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
......@@ -38,7 +38,7 @@ from ...image_utils import (
validate_kwargs,
validate_preprocess_arguments,
)
from ...utils import TensorType, logging
from ....utils import TensorType, logging
logger = logging.get_logger(__name__)
......
......@@ -25,11 +25,11 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutput, SequenceClassifierOutput
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
......
......@@ -16,7 +16,7 @@
Processor class for TVLT.
"""
from ...processing_utils import ProcessorMixin
from ....processing_utils import ProcessorMixin
class TvltProcessor(ProcessorMixin):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment