Unverified Commit a564d10a authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Deprecate low use models (#30781)

* Deprecate models
- graphormer
- time_series_transformer
- xlm_prophetnet
- qdqbert
- nat
- ernie_m
- tvlt
- nezha
- mega
- jukebox
- vit_hybrid
- x_clip
- deta
- speech_to_text_2
- efficientformer
- realm
- gptsan_japanese

* Fix up

* Fix speech2text2 imports

* Make sure message isn't indented

* Fix docstrings

* Correctly map for deprecated models from model_type

* Uncomment out

* Add back time series transformer and x-clip

* Import fix and fix-up

* Fix up with updated ruff
parent 7f08817b
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
......
......@@ -6,7 +6,7 @@ from typing import Any, Dict, List, Mapping
import numpy as np
import torch
from ...utils import is_cython_available, requires_backends
from ....utils import is_cython_available, requires_backends
if is_cython_available():
......
......@@ -14,8 +14,8 @@
# limitations under the License.
"""Graphormer model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -21,13 +21,13 @@ import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithNoAttention,
SequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ....modeling_utils import PreTrainedModel
from ....utils import logging
from .configuration_graphormer import GraphormerConfig
......
......@@ -14,7 +14,7 @@
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {
......
......@@ -17,8 +17,8 @@
import os
from typing import List, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -24,10 +24,10 @@ import torch.nn.functional as F
from torch import nn
from torch.nn import LayerNorm as FusedLayerNorm
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, logging
from ...utils.logging import tqdm
from ....activations import ACT2FN
from ....modeling_utils import PreTrainedModel
from ....utils import add_start_docstrings, logging
from ....utils.logging import tqdm
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
......
......@@ -24,10 +24,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
from ....tokenization_utils import AddedToken, PreTrainedTokenizer
from ....tokenization_utils_base import BatchEncoding
from ....utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ....utils.generic import _is_jax, _is_numpy
logger = logging.get_logger(__name__)
......
......@@ -14,7 +14,7 @@
from typing import TYPE_CHECKING
from ...utils import (
from ....utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
......
......@@ -17,9 +17,9 @@
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ....configuration_utils import PretrainedConfig
from ....onnx import OnnxConfig
from ....utils import logging
logger = logging.get_logger(__name__)
......
......@@ -23,8 +23,8 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
......@@ -33,9 +33,9 @@ from ...modeling_outputs import (
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import ALL_LAYERNORM_LAYERS
from ...utils import (
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import ALL_LAYERNORM_LAYERS
from ....utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {"configuration_nat": ["NatConfig"]}
......
......@@ -14,9 +14,9 @@
# limitations under the License.
"""Neighborhood Attention Transformer model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from ....configuration_utils import PretrainedConfig
from ....utils import logging
from ....utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
......
......@@ -23,11 +23,11 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
from ....activations import ACT2FN
from ....modeling_outputs import BackboneOutput
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import (
ModelOutput,
OptionalDependencyNotAvailable,
add_code_sample_docstrings,
......@@ -38,7 +38,7 @@ from ...utils import (
replace_return_docstrings,
requires_backends,
)
from ...utils.backbone_utils import BackboneMixin
from ....utils.backbone_utils import BackboneMixin
from .configuration_nat import NatConfig
......
......@@ -13,7 +13,7 @@
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
......
from ... import PretrainedConfig
from .... import PretrainedConfig
class NezhaConfig(PretrainedConfig):
......
......@@ -25,8 +25,8 @@ import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
from ....activations import ACT2FN
from ....modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
......@@ -36,9 +36,9 @@ from ...modeling_outputs import (
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment