Unverified Commit 44fe1a1c authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Avoid using uncessary `get_values(MODEL_MAPPING)` (#29362)



* more fixes

* more fixes

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent b647acdb
...@@ -21,7 +21,6 @@ from datasets import load_dataset ...@@ -21,7 +21,6 @@ from datasets import load_dataset
from packaging import version from packaging import version
from transformers import BeitConfig from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
...@@ -36,14 +35,13 @@ if is_torch_available(): ...@@ -36,14 +35,13 @@ if is_torch_available():
from torch import nn from torch import nn
from transformers import ( from transformers import (
MODEL_FOR_BACKBONE_MAPPING,
MODEL_MAPPING,
BeitBackbone, BeitBackbone,
BeitForImageClassification, BeitForImageClassification,
BeitForMaskedImageModeling, BeitForMaskedImageModeling,
BeitForSemanticSegmentation, BeitForSemanticSegmentation,
BeitModel, BeitModel,
) )
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -312,10 +310,10 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -312,10 +310,10 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling # we don't test BeitForMaskedImageModeling
if model_class in [ if model_class.__name__ in [
*get_values(MODEL_MAPPING), *MODEL_MAPPING_NAMES.values(),
*get_values(MODEL_FOR_BACKBONE_MAPPING), *MODEL_FOR_BACKBONE_MAPPING_NAMES.values(),
BeitForMaskedImageModeling, "BeitForMaskedImageModeling",
]: ]:
continue continue
...@@ -337,8 +335,12 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -337,8 +335,12 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling # we don't test BeitForMaskedImageModeling
if ( if (
model_class model_class.__name__
in [*get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling] in [
*MODEL_MAPPING_NAMES.values(),
*MODEL_FOR_BACKBONE_MAPPING_NAMES.values(),
"BeitForMaskedImageModeling",
]
or not model_class.supports_gradient_checkpointing or not model_class.supports_gradient_checkpointing
): ):
continue continue
......
...@@ -24,8 +24,7 @@ import numpy as np ...@@ -24,8 +24,7 @@ import numpy as np
import requests import requests
import transformers import transformers
from transformers import MODEL_MAPPING, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig from transformers import CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig
from transformers.models.auto import get_values
from transformers.testing_utils import ( from transformers.testing_utils import (
is_flax_available, is_flax_available,
is_pt_flax_cross_test, is_pt_flax_cross_test,
...@@ -52,6 +51,7 @@ if is_torch_available(): ...@@ -52,6 +51,7 @@ if is_torch_available():
from torch import nn from torch import nn
from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -751,7 +751,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -751,7 +751,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
print("Model class:", model_class) print("Model class:", model_class)
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
import unittest import unittest
from transformers import Data2VecVisionConfig from transformers import Data2VecVisionConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
...@@ -32,11 +31,11 @@ if is_torch_available(): ...@@ -32,11 +31,11 @@ if is_torch_available():
from torch import nn from torch import nn
from transformers import ( from transformers import (
MODEL_MAPPING,
Data2VecVisionForImageClassification, Data2VecVisionForImageClassification,
Data2VecVisionForSemanticSegmentation, Data2VecVisionForSemanticSegmentation,
Data2VecVisionModel, Data2VecVisionModel,
) )
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -235,7 +234,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -235,7 +234,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in [*get_values(MODEL_MAPPING)]: if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
...@@ -254,7 +253,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -254,7 +253,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in [*get_values(MODEL_MAPPING)] or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING # TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py # this can then be incorporated into _prepare_for_class in test_modeling_common.py
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
import warnings import warnings
from transformers import DeiTConfig from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import ( from transformers.testing_utils import (
require_accelerate, require_accelerate,
require_torch, require_torch,
...@@ -41,14 +40,16 @@ if is_torch_available(): ...@@ -41,14 +40,16 @@ if is_torch_available():
from torch import nn from torch import nn
from transformers import ( from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification, DeiTForImageClassification,
DeiTForImageClassificationWithTeacher, DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling, DeiTForMaskedImageModeling,
DeiTModel, DeiTModel,
) )
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -269,7 +270,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -269,7 +270,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only # DeiTForImageClassificationWithTeacher supports inference-only
if ( if (
model_class in get_values(MODEL_MAPPING) model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "DeiTForImageClassificationWithTeacher" or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
): ):
continue continue
...@@ -289,7 +290,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -289,7 +290,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
# DeiTForImageClassificationWithTeacher supports inference-only # DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher": if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
...@@ -325,10 +326,10 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -325,10 +326,10 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if ( if (
model_class model_class.__name__
not in [ not in [
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
] ]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher" or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
): ):
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
from transformers import DPTConfig from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -31,7 +30,8 @@ if is_torch_available(): ...@@ -31,7 +30,8 @@ if is_torch_available():
import torch import torch
from torch import nn from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -214,7 +214,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -214,7 +214,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
...@@ -233,7 +233,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -233,7 +233,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.use_cache = False config.use_cache = False
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
model = model_class(config) model = model_class(config)
model.to(torch_device) model.to(torch_device)
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
from transformers import Dinov2Config, DPTConfig from transformers import Dinov2Config, DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -30,7 +29,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin ...@@ -30,7 +29,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import MODEL_MAPPING, DPTForDepthEstimation from transformers import DPTForDepthEstimation
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -166,7 +166,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -166,7 +166,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
...@@ -185,7 +185,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -185,7 +185,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.use_cache = False config.use_cache = False
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
model = model_class(config) model = model_class(config)
model.to(torch_device) model.to(torch_device)
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
from transformers import DPTConfig from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -31,7 +30,8 @@ if is_torch_available(): ...@@ -31,7 +30,8 @@ if is_torch_available():
import torch import torch
from torch import nn from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -229,7 +229,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -229,7 +229,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
...@@ -248,7 +248,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -248,7 +248,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.use_cache = False config.use_cache = False
config.return_dict = True config.return_dict = True
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
model = model_class(config) model = model_class(config)
model.to(torch_device) model.to(torch_device)
......
...@@ -20,7 +20,6 @@ import warnings ...@@ -20,7 +20,6 @@ import warnings
from typing import List from typing import List
from transformers import EfficientFormerConfig from transformers import EfficientFormerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
...@@ -33,12 +32,14 @@ if is_torch_available(): ...@@ -33,12 +32,14 @@ if is_torch_available():
import torch import torch
from transformers import ( from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
EfficientFormerForImageClassification, EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher, EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel, EfficientFormerModel,
) )
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.efficientformer.modeling_efficientformer import ( from transformers.models.efficientformer.modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
) )
...@@ -308,7 +309,7 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ...@@ -308,7 +309,7 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# EfficientFormerForImageClassificationWithTeacher supports inference-only # EfficientFormerForImageClassificationWithTeacher supports inference-only
if ( if (
model_class in get_values(MODEL_MAPPING) model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher" or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher"
): ):
continue continue
...@@ -330,9 +331,9 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ...@@ -330,9 +331,9 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if ( if (
model_class model_class.__name__
not in [ not in [
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
] ]
or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher" or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher"
): ):
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
import unittest import unittest
from transformers import is_torch_available, is_vision_available from transformers import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -29,7 +28,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin ...@@ -29,7 +28,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import MODEL_MAPPING, GLPNConfig, GLPNForDepthEstimation, GLPNModel from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -291,7 +291,7 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -291,7 +291,7 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING # TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py # this can then be incorporated into _prepare_for_class in test_modeling_common.py
......
...@@ -21,7 +21,6 @@ from math import ceil, floor ...@@ -21,7 +21,6 @@ from math import ceil, floor
from transformers import LevitConfig from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -33,12 +32,14 @@ if is_torch_available(): ...@@ -33,12 +32,14 @@ if is_torch_available():
import torch import torch
from transformers import ( from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification, LevitForImageClassification,
LevitForImageClassificationWithTeacher, LevitForImageClassificationWithTeacher,
LevitModel, LevitModel,
) )
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -297,7 +298,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -297,7 +298,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only # LevitForImageClassificationWithTeacher supports inference-only
if ( if (
model_class in get_values(MODEL_MAPPING) model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "LevitForImageClassificationWithTeacher" or model_class.__name__ == "LevitForImageClassificationWithTeacher"
): ):
continue continue
...@@ -317,7 +318,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -317,7 +318,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue continue
# LevitForImageClassificationWithTeacher supports inference-only # LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher": if model_class.__name__ == "LevitForImageClassificationWithTeacher":
...@@ -341,9 +342,9 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -341,9 +342,9 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if ( if (
model_class model_class.__name__
not in [ not in [
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), *MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
] ]
or model_class.__name__ == "LevitForImageClassificationWithTeacher" or model_class.__name__ == "LevitForImageClassificationWithTeacher"
): ):
......
...@@ -26,7 +26,6 @@ import numpy as np ...@@ -26,7 +26,6 @@ import numpy as np
from datasets import load_dataset from datasets import load_dataset
from transformers import PerceiverConfig from transformers import PerceiverConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available from transformers.utils import is_torch_available, is_vision_available
...@@ -40,11 +39,6 @@ if is_torch_available(): ...@@ -40,11 +39,6 @@ if is_torch_available():
from torch import nn from torch import nn
from transformers import ( from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier, PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned, PerceiverForImageClassificationLearned,
...@@ -55,6 +49,13 @@ if is_torch_available(): ...@@ -55,6 +49,13 @@ if is_torch_available():
PerceiverModel, PerceiverModel,
PerceiverTokenizer, PerceiverTokenizer,
) )
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_MASKED_LM_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.perceiver.modeling_perceiver import PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.perceiver.modeling_perceiver import PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -317,16 +318,19 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -317,16 +318,19 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
inputs_dict["subsampled_output_points"] = self.model_tester.subsampling inputs_dict["subsampled_output_points"] = self.model_tester.subsampling
if return_labels: if return_labels:
if model_class in [ if model_class.__name__ in [
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), "PerceiverForImageClassificationLearned",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationConvProcessing",
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]: ]:
inputs_dict["labels"] = torch.zeros( inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device self.model_tester.batch_size, dtype=torch.long, device=torch_device
) )
elif model_class in [ elif model_class.__name__ in [
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES.values(),
*get_values(MODEL_FOR_MASKED_LM_MAPPING), *MODEL_FOR_MASKED_LM_MAPPING_NAMES.values(),
]: ]:
inputs_dict["labels"] = torch.zeros( inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
...@@ -380,10 +384,10 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -380,10 +384,10 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
return return
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in [ if model_class.__name__ in [
*get_values(MODEL_MAPPING), *MODEL_MAPPING_NAMES.values(),
PerceiverForOpticalFlow, "PerceiverForOpticalFlow",
PerceiverForMultimodalAutoencoding, "PerceiverForMultimodalAutoencoding",
]: ]:
continue continue
...@@ -727,11 +731,14 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -727,11 +731,14 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# most Perceiver models don't have a typical head like is the case with BERT # most Perceiver models don't have a typical head like is the case with BERT
if model_class in [ if model_class.__name__ in [
PerceiverForOpticalFlow, "PerceiverForOpticalFlow",
PerceiverForMultimodalAutoencoding, "PerceiverForMultimodalAutoencoding",
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), "PerceiverForImageClassificationLearned",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationConvProcessing",
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]: ]:
continue continue
...@@ -753,7 +760,7 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -753,7 +760,7 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
] ]
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): if model_class.__name__ not in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values():
continue continue
config, inputs, input_mask, _, _ = self.model_tester.prepare_config_and_inputs(model_class=model_class) config, inputs, input_mask, _, _ = self.model_tester.prepare_config_and_inputs(model_class=model_class)
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
import unittest import unittest
from transformers import is_torch_available, is_vision_available from transformers import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import ( from transformers.testing_utils import (
require_accelerate, require_accelerate,
require_torch, require_torch,
...@@ -36,7 +35,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin ...@@ -36,7 +35,8 @@ from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import MODEL_MAPPING, PvtConfig, PvtForImageClassification, PvtImageProcessor, PvtModel from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor, PvtModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.pvt.modeling_pvt import PVT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.pvt.modeling_pvt import PVT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -243,7 +243,7 @@ class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -243,7 +243,7 @@ class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
model.to(torch_device) model.to(torch_device)
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
import unittest import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -30,11 +29,11 @@ if is_torch_available(): ...@@ -30,11 +29,11 @@ if is_torch_available():
import torch import torch
from transformers import ( from transformers import (
MODEL_MAPPING,
SegformerForImageClassification, SegformerForImageClassification,
SegformerForSemanticSegmentation, SegformerForSemanticSegmentation,
SegformerModel, SegformerModel,
) )
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -324,7 +323,7 @@ class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -324,7 +323,7 @@ class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
config.return_dict = True config.return_dict = True
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING): if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue continue
model = model_class(config) model = model_class(config)
......
...@@ -20,7 +20,6 @@ from datasets import load_dataset ...@@ -20,7 +20,6 @@ from datasets import load_dataset
from packaging import version from packaging import version
from transformers import ViltConfig, is_torch_available, is_vision_available from transformers import ViltConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property from transformers.utils import cached_property
...@@ -33,7 +32,6 @@ if is_torch_available(): ...@@ -33,7 +32,6 @@ if is_torch_available():
import torch import torch
from transformers import ( from transformers import (
MODEL_MAPPING,
ViltForImageAndTextRetrieval, ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification, ViltForImagesAndTextClassification,
ViltForMaskedLM, ViltForMaskedLM,
...@@ -41,6 +39,7 @@ if is_torch_available(): ...@@ -41,6 +39,7 @@ if is_torch_available():
ViltForTokenClassification, ViltForTokenClassification,
ViltModel, ViltModel,
) )
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available(): if is_vision_available():
...@@ -284,7 +283,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -284,7 +283,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config.modality_type_vocab_size = 3 config.modality_type_vocab_size = 3
# ViltForImageAndTextRetrieval doesn't support training for now # ViltForImageAndTextRetrieval doesn't support training for now
if model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]: if model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"]:
continue continue
model = model_class(config) model = model_class(config)
...@@ -307,7 +306,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -307,7 +306,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
# ViltForImageAndTextRetrieval doesn't support training for now # ViltForImageAndTextRetrieval doesn't support training for now
if ( if (
model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval] model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"]
or not model_class.supports_gradient_checkpointing or not model_class.supports_gradient_checkpointing
): ):
continue continue
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment