Unverified Commit 633e5e89 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[Refactor] Relative imports wherever we can (#21880)

* initial commit

* update

* second batch

* style

* fix imports

* fix relative import on pipeline
parent 43299c63
...@@ -14,9 +14,8 @@ ...@@ -14,9 +14,8 @@
# limitations under the License. # limitations under the License.
""" MarkupLM model configuration""" """ MarkupLM model configuration"""
from transformers.utils import logging
from ...configuration_utils import PretrainedConfig from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
......
...@@ -23,13 +23,13 @@ import torch.utils.checkpoint ...@@ -23,13 +23,13 @@ import torch.utils.checkpoint
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN from ...activations import ACT2FN
from transformers.file_utils import ( from ...file_utils import (
add_start_docstrings, add_start_docstrings,
add_start_docstrings_to_model_forward, add_start_docstrings_to_model_forward,
replace_return_docstrings, replace_return_docstrings,
) )
from transformers.modeling_outputs import ( from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput, MaskedLMOutput,
...@@ -37,14 +37,13 @@ from transformers.modeling_outputs import ( ...@@ -37,14 +37,13 @@ from transformers.modeling_outputs import (
SequenceClassifierOutput, SequenceClassifierOutput,
TokenClassifierOutput, TokenClassifierOutput,
) )
from transformers.modeling_utils import ( from ...modeling_utils import (
PreTrainedModel, PreTrainedModel,
apply_chunking_to_forward, apply_chunking_to_forward,
find_pruneable_heads_and_indices, find_pruneable_heads_and_indices,
prune_linear_layer, prune_linear_layer,
) )
from transformers.utils import logging from ...utils import logging
from .configuration_markuplm import MarkupLMConfig from .configuration_markuplm import MarkupLMConfig
......
...@@ -20,8 +20,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union ...@@ -20,8 +20,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np import numpy as np
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from transformers.image_transforms import ( from ...image_transforms import (
PaddingMode, PaddingMode,
get_resize_output_image_size, get_resize_output_image_size,
normalize, normalize,
...@@ -31,7 +31,7 @@ from transformers.image_transforms import ( ...@@ -31,7 +31,7 @@ from transformers.image_transforms import (
to_channel_dimension_format, to_channel_dimension_format,
to_numpy_array, to_numpy_array,
) )
from transformers.image_utils import ( from ...image_utils import (
ChannelDimension, ChannelDimension,
ImageInput, ImageInput,
PILImageResampling, PILImageResampling,
...@@ -40,7 +40,7 @@ from transformers.image_utils import ( ...@@ -40,7 +40,7 @@ from transformers.image_utils import (
is_batched, is_batched,
valid_images, valid_images,
) )
from transformers.utils import ( from ...utils import (
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_STD,
TensorType, TensorType,
......
...@@ -24,9 +24,7 @@ import numpy as np ...@@ -24,9 +24,7 @@ import numpy as np
import torch import torch
from torch import Tensor, nn from torch import Tensor, nn
from transformers import AutoBackbone, SwinConfig from ... import AutoBackbone, SwinConfig
from transformers.utils import logging
from ...activations import ACT2FN from ...activations import ACT2FN
from ...file_utils import ( from ...file_utils import (
ModelOutput, ModelOutput,
...@@ -38,6 +36,7 @@ from ...file_utils import ( ...@@ -38,6 +36,7 @@ from ...file_utils import (
) )
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_mask2former import Mask2FormerConfig from .configuration_mask2former import Mask2FormerConfig
......
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
import warnings import warnings
from transformers.utils import logging from ...utils import logging
from .image_processing_maskformer import MaskFormerImageProcessor from .image_processing_maskformer import MaskFormerImageProcessor
......
...@@ -20,8 +20,8 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tupl ...@@ -20,8 +20,8 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tupl
import numpy as np import numpy as np
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from transformers.image_transforms import ( from ...image_transforms import (
PaddingMode, PaddingMode,
get_resize_output_image_size, get_resize_output_image_size,
normalize, normalize,
...@@ -31,7 +31,7 @@ from transformers.image_transforms import ( ...@@ -31,7 +31,7 @@ from transformers.image_transforms import (
to_channel_dimension_format, to_channel_dimension_format,
to_numpy_array, to_numpy_array,
) )
from transformers.image_utils import ( from ...image_utils import (
ChannelDimension, ChannelDimension,
ImageInput, ImageInput,
PILImageResampling, PILImageResampling,
...@@ -40,7 +40,7 @@ from transformers.image_utils import ( ...@@ -40,7 +40,7 @@ from transformers.image_utils import (
make_list_of_images, make_list_of_images,
valid_images, valid_images,
) )
from transformers.utils import ( from ...utils import (
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_STD,
TensorType, TensorType,
......
...@@ -24,9 +24,7 @@ import numpy as np ...@@ -24,9 +24,7 @@ import numpy as np
import torch import torch
from torch import Tensor, nn from torch import Tensor, nn
from transformers import AutoBackbone from ... import AutoBackbone
from transformers.utils import logging
from ...activations import ACT2FN from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithCrossAttentions from ...modeling_outputs import BaseModelOutputWithCrossAttentions
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
...@@ -35,6 +33,7 @@ from ...utils import ( ...@@ -35,6 +33,7 @@ from ...utils import (
add_start_docstrings, add_start_docstrings,
add_start_docstrings_to_model_forward, add_start_docstrings_to_model_forward,
is_scipy_available, is_scipy_available,
logging,
replace_return_docstrings, replace_return_docstrings,
requires_backends, requires_backends,
) )
......
...@@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union ...@@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
import numpy as np import numpy as np
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import ( from ...image_transforms import (
center_crop, center_crop,
...@@ -39,7 +37,7 @@ from ...image_utils import ( ...@@ -39,7 +37,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, logging
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
......
...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union ...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
from transformers.utils import is_torch_available, is_torch_tensor
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import ( from ...image_transforms import (
center_crop, center_crop,
...@@ -40,7 +37,7 @@ from ...image_utils import ( ...@@ -40,7 +37,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available(): if is_torch_available():
......
...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union ...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, get_resize_output_image_size, rescale, resize, to_channel_dimension_format from ...image_transforms import center_crop, get_resize_output_image_size, rescale, resize, to_channel_dimension_format
from ...image_utils import ( from ...image_utils import (
...@@ -32,7 +29,7 @@ from ...image_utils import ( ...@@ -32,7 +29,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available(): if is_vision_available():
......
from transformers import PretrainedConfig from ... import PretrainedConfig
NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = { NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
...@@ -21,8 +21,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union ...@@ -21,8 +21,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np import numpy as np
from huggingface_hub import hf_hub_download from huggingface_hub import hf_hub_download
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from transformers.image_transforms import ( from ...image_transforms import (
PaddingMode, PaddingMode,
get_resize_output_image_size, get_resize_output_image_size,
normalize, normalize,
...@@ -32,7 +32,7 @@ from transformers.image_transforms import ( ...@@ -32,7 +32,7 @@ from transformers.image_transforms import (
to_channel_dimension_format, to_channel_dimension_format,
to_numpy_array, to_numpy_array,
) )
from transformers.image_utils import ( from ...image_utils import (
ChannelDimension, ChannelDimension,
ImageInput, ImageInput,
PILImageResampling, PILImageResampling,
...@@ -41,7 +41,7 @@ from transformers.image_utils import ( ...@@ -41,7 +41,7 @@ from transformers.image_utils import (
make_list_of_images, make_list_of_images,
valid_images, valid_images,
) )
from transformers.utils import ( from ...utils import (
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_STD,
TensorType, TensorType,
......
...@@ -24,9 +24,7 @@ import torch ...@@ -24,9 +24,7 @@ import torch
from torch import Tensor, nn from torch import Tensor, nn
from torch.cuda.amp import autocast from torch.cuda.amp import autocast
from transformers import AutoBackbone from ... import AutoBackbone
from transformers.utils import logging
from ...activations import ACT2FN from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
...@@ -35,6 +33,7 @@ from ...utils import ( ...@@ -35,6 +33,7 @@ from ...utils import (
add_start_docstrings, add_start_docstrings,
add_start_docstrings_to_model_forward, add_start_docstrings_to_model_forward,
is_scipy_available, is_scipy_available,
logging,
replace_return_docstrings, replace_return_docstrings,
requires_backends, requires_backends,
) )
......
...@@ -18,9 +18,8 @@ Image/Text processor class for OneFormer ...@@ -18,9 +18,8 @@ Image/Text processor class for OneFormer
from typing import List from typing import List
from transformers.utils import is_torch_available
from ...processing_utils import ProcessorMixin from ...processing_utils import ProcessorMixin
from ...utils import is_torch_available
if is_torch_available(): if is_torch_available():
......
...@@ -19,8 +19,8 @@ from typing import Dict, List, Optional, Tuple, Union ...@@ -19,8 +19,8 @@ from typing import Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from transformers.image_transforms import ( from ...image_transforms import (
center_crop, center_crop,
center_to_corners_format, center_to_corners_format,
normalize, normalize,
...@@ -29,7 +29,7 @@ from transformers.image_transforms import ( ...@@ -29,7 +29,7 @@ from transformers.image_transforms import (
to_channel_dimension_format, to_channel_dimension_format,
to_numpy_array, to_numpy_array,
) )
from transformers.image_utils import ( from ...image_utils import (
OPENAI_CLIP_MEAN, OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD, OPENAI_CLIP_STD,
ChannelDimension, ChannelDimension,
...@@ -38,7 +38,7 @@ from transformers.image_utils import ( ...@@ -38,7 +38,7 @@ from transformers.image_utils import (
make_list_of_images, make_list_of_images,
valid_images, valid_images,
) )
from transformers.utils import TensorType, is_torch_available, logging from ...utils import TensorType, is_torch_available, logging
if is_torch_available(): if is_torch_available():
......
...@@ -21,10 +21,9 @@ from typing import List ...@@ -21,10 +21,9 @@ from typing import List
import numpy as np import numpy as np
from transformers import is_flax_available, is_tf_available, is_torch_available
from ...processing_utils import ProcessorMixin from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class OwlViTProcessor(ProcessorMixin): class OwlViTProcessor(ProcessorMixin):
......
...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union ...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
import numpy as np import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import ( from ...image_utils import (
...@@ -34,7 +31,7 @@ from ...image_utils import ( ...@@ -34,7 +31,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, is_vision_available, logging
if is_vision_available(): if is_vision_available():
......
...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union ...@@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
import numpy as np import numpy as np
from transformers import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import ( from ...image_transforms import (
center_crop, center_crop,
...@@ -40,7 +37,7 @@ from ...image_utils import ( ...@@ -40,7 +37,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, is_vision_available, logging
if is_vision_available(): if is_vision_available():
......
...@@ -20,8 +20,7 @@ from typing import Optional, Union ...@@ -20,8 +20,7 @@ from typing import Optional, Union
import numpy as np import numpy as np
from huggingface_hub import hf_hub_download from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer from ... import AutoTokenizer
from ...utils import logging from ...utils import logging
......
...@@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union ...@@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import ( from ...image_utils import (
...@@ -34,7 +31,7 @@ from ...image_utils import ( ...@@ -34,7 +31,7 @@ from ...image_utils import (
to_numpy_array, to_numpy_array,
valid_images, valid_images,
) )
from ...utils import logging from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available(): if is_vision_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment