Unverified Commit 11505fa1 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Dummies multi backend (#11100)

* Replaces requires_xxx by one generic method

* Quality and update check_dummies

* Fix inits check

* Post-merge cleanup
parent 424419f5
...@@ -339,9 +339,6 @@ if is_tokenizers_available(): ...@@ -339,9 +339,6 @@ if is_tokenizers_available():
_import_structure["models.xlnet"].append("XLNetTokenizerFast") _import_structure["models.xlnet"].append("XLNetTokenizerFast")
_import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"] _import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"]
if is_sentencepiece_available():
_import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"]
else: else:
from .utils import dummy_tokenizers_objects from .utils import dummy_tokenizers_objects
...@@ -349,13 +346,19 @@ else: ...@@ -349,13 +346,19 @@ else:
name for name in dir(dummy_tokenizers_objects) if not name.startswith("_") name for name in dir(dummy_tokenizers_objects) if not name.startswith("_")
] ]
if is_sentencepiece_available() and is_tokenizers_available():
_import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"]
else:
from .utils import dummy_sentencepiece_and_tokenizers_objects
_import_structure["utils.dummy_sentencepiece_and_tokenizers_objects"] = [
name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith("_")
]
# Speech-specific objects # Speech-specific objects
if is_speech_available(): if is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor") _import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor")
if is_sentencepiece_available():
_import_structure["models.speech_to_text"].append("Speech2TextProcessor")
else: else:
from .utils import dummy_speech_objects from .utils import dummy_speech_objects
...@@ -363,6 +366,15 @@ else: ...@@ -363,6 +366,15 @@ else:
name for name in dir(dummy_speech_objects) if not name.startswith("_") name for name in dir(dummy_speech_objects) if not name.startswith("_")
] ]
if is_sentencepiece_available() and is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextProcessor")
else:
from .utils import dummy_sentencepiece_and_speech_objects
_import_structure["utils.dummy_sentencepiece_and_speech_objects"] = [
name for name in dir(dummy_sentencepiece_and_speech_objects) if not name.startswith("_")
]
# Vision-specific objects # Vision-specific objects
if is_vision_available(): if is_vision_available():
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"] _import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
...@@ -1641,21 +1653,25 @@ if TYPE_CHECKING: ...@@ -1641,21 +1653,25 @@ if TYPE_CHECKING:
from .models.xlnet import XLNetTokenizerFast from .models.xlnet import XLNetTokenizerFast
from .tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_utils_fast import PreTrainedTokenizerFast
if is_sentencepiece_available():
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
else: else:
from .utils.dummy_tokenizers_objects import * from .utils.dummy_tokenizers_objects import *
if is_sentencepiece_available() and is_tokenizers_available():
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
else:
from .utils.dummies_sentencepiece_and_tokenizers_objects import *
if is_speech_available(): if is_speech_available():
from .models.speech_to_text import Speech2TextFeatureExtractor from .models.speech_to_text import Speech2TextFeatureExtractor
if is_sentencepiece_available():
from .models.speech_to_text import Speech2TextProcessor
else: else:
from .utils.dummy_speech_objects import * from .utils.dummy_speech_objects import *
if is_speech_available() and is_sentencepiece_available():
from .models.speech_to_text import Speech2TextProcessor
else:
from .utils.dummy_sentencepiece_and_speech_objects import *
if is_vision_available(): if is_vision_available():
from .image_utils import ImageFeatureExtractionMixin from .image_utils import ImageFeatureExtractionMixin
from .models.vit import ViTFeatureExtractor from .models.vit import ViTFeatureExtractor
......
...@@ -24,7 +24,7 @@ from typing import Dict, List, Tuple ...@@ -24,7 +24,7 @@ from typing import Dict, List, Tuple
from tokenizers import Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers import Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece from tokenizers.models import BPE, Unigram, WordPiece
from .file_utils import requires_protobuf, requires_sentencepiece from .file_utils import requires_backends
class SentencePieceExtractor: class SentencePieceExtractor:
...@@ -33,7 +33,7 @@ class SentencePieceExtractor: ...@@ -33,7 +33,7 @@ class SentencePieceExtractor:
""" """
def __init__(self, model: str): def __init__(self, model: str):
requires_sentencepiece(self) requires_backends(self, "sentencepiece")
from sentencepiece import SentencePieceProcessor from sentencepiece import SentencePieceProcessor
self.sp = SentencePieceProcessor() self.sp = SentencePieceProcessor()
...@@ -298,7 +298,7 @@ class RobertaConverter(Converter): ...@@ -298,7 +298,7 @@ class RobertaConverter(Converter):
class SpmConverter(Converter): class SpmConverter(Converter):
def __init__(self, *args): def __init__(self, *args):
requires_protobuf(self) requires_backends(self, "protobuf")
super().__init__(*args) super().__init__(*args)
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import warnings import warnings
from ...file_utils import is_sklearn_available, requires_sklearn from ...file_utils import is_sklearn_available, requires_backends
if is_sklearn_available(): if is_sklearn_available():
...@@ -34,13 +34,13 @@ DEPRECATION_WARNING = ( ...@@ -34,13 +34,13 @@ DEPRECATION_WARNING = (
def simple_accuracy(preds, labels): def simple_accuracy(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning) warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(simple_accuracy) requires_backends(simple_accuracy, "sklearn")
return (preds == labels).mean() return (preds == labels).mean()
def acc_and_f1(preds, labels): def acc_and_f1(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning) warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(acc_and_f1) requires_backends(acc_and_f1, "sklearn")
acc = simple_accuracy(preds, labels) acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds) f1 = f1_score(y_true=labels, y_pred=preds)
return { return {
...@@ -52,7 +52,7 @@ def acc_and_f1(preds, labels): ...@@ -52,7 +52,7 @@ def acc_and_f1(preds, labels):
def pearson_and_spearman(preds, labels): def pearson_and_spearman(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning) warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(pearson_and_spearman) requires_backends(pearson_and_spearman, "sklearn")
pearson_corr = pearsonr(preds, labels)[0] pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0]
return { return {
...@@ -64,7 +64,7 @@ def pearson_and_spearman(preds, labels): ...@@ -64,7 +64,7 @@ def pearson_and_spearman(preds, labels):
def glue_compute_metrics(task_name, preds, labels): def glue_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning) warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(glue_compute_metrics) requires_backends(glue_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "cola": if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)} return {"mcc": matthews_corrcoef(labels, preds)}
...@@ -94,7 +94,7 @@ def glue_compute_metrics(task_name, preds, labels): ...@@ -94,7 +94,7 @@ def glue_compute_metrics(task_name, preds, labels):
def xnli_compute_metrics(task_name, preds, labels): def xnli_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning) warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(xnli_compute_metrics) requires_backends(xnli_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "xnli": if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)} return {"acc": simple_accuracy(preds, labels)}
......
...@@ -532,82 +532,32 @@ VISION_IMPORT_ERROR = """ ...@@ -532,82 +532,32 @@ VISION_IMPORT_ERROR = """
""" """
def requires_datasets(obj): BACKENDS_MAPPING = OrderedDict(
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ [
if not is_datasets_available(): ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
raise ImportError(DATASETS_IMPORT_ERROR.format(name)) ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)),
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
def requires_faiss(obj): ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)),
if not is_faiss_available(): ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
raise ImportError(FAISS_IMPORT_ERROR.format(name)) ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),
("speech", (is_speech_available, SPEECH_IMPORT_ERROR)),
("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
def requires_pytorch(obj): ("tokenziers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
if not is_torch_available(): ("vision", (is_vision_available, VISION_IMPORT_ERROR)),
raise ImportError(PYTORCH_IMPORT_ERROR.format(name)) ]
)
def requires_sklearn(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_sklearn_available():
raise ImportError(SKLEARN_IMPORT_ERROR.format(name))
def requires_tf(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_tf_available():
raise ImportError(TENSORFLOW_IMPORT_ERROR.format(name))
def requires_flax(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_flax_available():
raise ImportError(FLAX_IMPORT_ERROR.format(name))
def requires_tokenizers(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_tokenizers_available():
raise ImportError(TOKENIZERS_IMPORT_ERROR.format(name))
def requires_sentencepiece(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_sentencepiece_available():
raise ImportError(SENTENCEPIECE_IMPORT_ERROR.format(name))
def requires_protobuf(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_protobuf_available():
raise ImportError(PROTOBUF_IMPORT_ERROR.format(name))
def requires_pandas(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_pandas_available():
raise ImportError(PANDAS_IMPORT_ERROR.format(name))
def requires_scatter(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_scatter_available():
raise ImportError(SCATTER_IMPORT_ERROR.format(name))
def requires_speech(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_speech_available():
raise ImportError(SPEECH_IMPORT_ERROR.format(name))
def requires_backends(obj, backends):
if not isinstance(backends, (list, tuple)):
backends = [backends]
def requires_vision(obj):
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
if not is_vision_available(): if not all(BACKENDS_MAPPING[backend][0]() for backend in backends):
raise ImportError(VISION_IMPORT_ERROR.format(name)) raise ImportError("".join([BACKENDS_MAPPING[backend][1].format(name) for backend in backends]))
def add_start_docstrings(*docstr): def add_start_docstrings(*docstr):
......
...@@ -21,14 +21,7 @@ from typing import Iterable, List, Optional, Tuple ...@@ -21,14 +21,7 @@ from typing import Iterable, List, Optional, Tuple
import numpy as np import numpy as np
from ...file_utils import ( from ...file_utils import cached_path, is_datasets_available, is_faiss_available, is_remote_url, requires_backends
cached_path,
is_datasets_available,
is_faiss_available,
is_remote_url,
requires_datasets,
requires_faiss,
)
from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_base import BatchEncoding
from ...utils import logging from ...utils import logging
from .configuration_rag import RagConfig from .configuration_rag import RagConfig
...@@ -372,8 +365,7 @@ class RagRetriever: ...@@ -372,8 +365,7 @@ class RagRetriever:
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
self._init_retrieval = init_retrieval self._init_retrieval = init_retrieval
requires_datasets(self) requires_backends(self, ["datasets", "faiss"])
requires_faiss(self)
super().__init__() super().__init__()
self.index = index or self._build_index(config) self.index = index or self._build_index(config)
self.generator_tokenizer = generator_tokenizer self.generator_tokenizer = generator_tokenizer
...@@ -411,8 +403,7 @@ class RagRetriever: ...@@ -411,8 +403,7 @@ class RagRetriever:
@classmethod @classmethod
def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
requires_datasets(cls) requires_backends(cls, ["datasets", "faiss"])
requires_faiss(cls)
config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder question_encoder_tokenizer = rag_tokenizer.question_encoder
......
...@@ -33,7 +33,7 @@ from ...file_utils import ( ...@@ -33,7 +33,7 @@ from ...file_utils import (
add_start_docstrings_to_model_forward, add_start_docstrings_to_model_forward,
is_scatter_available, is_scatter_available,
replace_return_docstrings, replace_return_docstrings,
requires_scatter, requires_backends,
) )
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from ...modeling_utils import ( from ...modeling_utils import (
...@@ -792,7 +792,7 @@ class TapasModel(TapasPreTrainedModel): ...@@ -792,7 +792,7 @@ class TapasModel(TapasPreTrainedModel):
""" """
def __init__(self, config, add_pooling_layer=True): def __init__(self, config, add_pooling_layer=True):
requires_scatter(self) requires_backends(self, "scatter")
super().__init__(config) super().__init__(config)
self.config = config self.config = config
......
...@@ -2,7 +2,7 @@ import collections ...@@ -2,7 +2,7 @@ import collections
import numpy as np import numpy as np
from ..file_utils import add_end_docstrings, is_torch_available, requires_pandas from ..file_utils import add_end_docstrings, is_torch_available, requires_backends
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline, PipelineException from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline, PipelineException
...@@ -24,7 +24,7 @@ class TableQuestionAnsweringArgumentHandler(ArgumentHandler): ...@@ -24,7 +24,7 @@ class TableQuestionAnsweringArgumentHandler(ArgumentHandler):
# ..., # ...,
# {"table": pd.DataFrame, "query" : List[str]} # {"table": pd.DataFrame, "query" : List[str]}
# ] # ]
requires_pandas(self) requires_backends(self, "pandas")
import pandas as pd import pandas as pd
if table is None: if table is None:
......
# This file is autogenerated by the command `make fix-copies`, do not edit. # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_flax from ..file_utils import requires_backends
class FlaxPreTrainedModel: class FlaxPreTrainedModel:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None
...@@ -37,153 +37,153 @@ FLAX_MODEL_MAPPING = None ...@@ -37,153 +37,153 @@ FLAX_MODEL_MAPPING = None
class FlaxAutoModel: class FlaxAutoModel:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForMaskedLM: class FlaxAutoModelForMaskedLM:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForMultipleChoice: class FlaxAutoModelForMultipleChoice:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForNextSentencePrediction: class FlaxAutoModelForNextSentencePrediction:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForPreTraining: class FlaxAutoModelForPreTraining:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForQuestionAnswering: class FlaxAutoModelForQuestionAnswering:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForSequenceClassification: class FlaxAutoModelForSequenceClassification:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxAutoModelForTokenClassification: class FlaxAutoModelForTokenClassification:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForMaskedLM: class FlaxBertForMaskedLM:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForMultipleChoice: class FlaxBertForMultipleChoice:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForNextSentencePrediction: class FlaxBertForNextSentencePrediction:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForPreTraining: class FlaxBertForPreTraining:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForQuestionAnswering: class FlaxBertForQuestionAnswering:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForSequenceClassification: class FlaxBertForSequenceClassification:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertForTokenClassification: class FlaxBertForTokenClassification:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertModel: class FlaxBertModel:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxBertPreTrainedModel: class FlaxBertPreTrainedModel:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
class FlaxRobertaModel: class FlaxRobertaModel:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_flax(self) requires_backends(self, ["flax"])
This diff is collapsed.
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends
class Speech2TextProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece", "speech"])
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends
SLOW_TO_FAST_CONVERTERS = None
def convert_slow_tokenizer(*args, **kwargs):
requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"])
# This file is autogenerated by the command `make fix-copies`, do not edit. # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_sentencepiece from ..file_utils import requires_backends
class AlbertTokenizer: class AlbertTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class BarthezTokenizer: class BarthezTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class BertGenerationTokenizer: class BertGenerationTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class CamembertTokenizer: class CamembertTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class DebertaV2Tokenizer: class DebertaV2Tokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class M2M100Tokenizer: class M2M100Tokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class MarianTokenizer: class MarianTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class MBart50Tokenizer: class MBart50Tokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class MBartTokenizer: class MBartTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class MT5Tokenizer: class MT5Tokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class PegasusTokenizer: class PegasusTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class ReformerTokenizer: class ReformerTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class Speech2TextTokenizer: class Speech2TextTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class T5Tokenizer: class T5Tokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class XLMProphetNetTokenizer: class XLMProphetNetTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class XLMRobertaTokenizer: class XLMRobertaTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
class XLNetTokenizer: class XLNetTokenizer:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) requires_backends(self, ["sentencepiece"])
# This file is autogenerated by the command `make fix-copies`, do not edit. # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_speech from ..file_utils import requires_backends
class Speech2TextFeatureExtractor: class Speech2TextFeatureExtractor:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_speech(self) requires_backends(self, ["speech"])
class Speech2TextProcessor:
def __init__(self, *args, **kwargs):
requires_speech(self)
This diff is collapsed.
# This file is autogenerated by the command `make fix-copies`, do not edit. # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_tokenizers from ..file_utils import requires_backends
class AlbertTokenizerFast: class AlbertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class BartTokenizerFast: class BartTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class BarthezTokenizerFast: class BarthezTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class BertTokenizerFast: class BertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class CamembertTokenizerFast: class CamembertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class ConvBertTokenizerFast: class ConvBertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class DistilBertTokenizerFast: class DistilBertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class DPRContextEncoderTokenizerFast: class DPRContextEncoderTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class DPRQuestionEncoderTokenizerFast: class DPRQuestionEncoderTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class DPRReaderTokenizerFast: class DPRReaderTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class ElectraTokenizerFast: class ElectraTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class FunnelTokenizerFast: class FunnelTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class GPT2TokenizerFast: class GPT2TokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class HerbertTokenizerFast: class HerbertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class LayoutLMTokenizerFast: class LayoutLMTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class LEDTokenizerFast: class LEDTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class LongformerTokenizerFast: class LongformerTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class LxmertTokenizerFast: class LxmertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class MBart50TokenizerFast: class MBart50TokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class MBartTokenizerFast: class MBartTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class MobileBertTokenizerFast: class MobileBertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class MPNetTokenizerFast: class MPNetTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class MT5TokenizerFast: class MT5TokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class OpenAIGPTTokenizerFast: class OpenAIGPTTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class PegasusTokenizerFast: class PegasusTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class ReformerTokenizerFast: class ReformerTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class RetriBertTokenizerFast: class RetriBertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class RobertaTokenizerFast: class RobertaTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class SqueezeBertTokenizerFast: class SqueezeBertTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class T5TokenizerFast: class T5TokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class XLMRobertaTokenizerFast: class XLMRobertaTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class XLNetTokenizerFast: class XLNetTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
class PreTrainedTokenizerFast: class PreTrainedTokenizerFast:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) requires_backends(self, ["tokenizers"])
SLOW_TO_FAST_CONVERTERS = None
def convert_slow_tokenizer(*args, **kwargs):
requires_tokenizers(convert_slow_tokenizer)
# This file is autogenerated by the command `make fix-copies`, do not edit. # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_vision from ..file_utils import requires_backends
class ImageFeatureExtractionMixin: class ImageFeatureExtractionMixin:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_vision(self) requires_backends(self, ["vision"])
class ViTFeatureExtractor: class ViTFeatureExtractor:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_vision(self) requires_backends(self, ["vision"])
...@@ -22,11 +22,11 @@ import re ...@@ -22,11 +22,11 @@ import re
# python utils/check_dummies.py # python utils/check_dummies.py
PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z]*)_available()")
# Matches from xxx import bla
_re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_re_test_backend = re.compile(r"^\s+if\s+is\_([a-z]*)\_available\(\):\s*$") _re_test_backend = re.compile(r"^\s+if\s+is\_[a-z]*\_available\(\)")
BACKENDS = ["torch", "tf", "flax", "sentencepiece", "speech", "tokenizers", "vision"]
DUMMY_CONSTANT = """ DUMMY_CONSTANT = """
...@@ -36,25 +36,34 @@ DUMMY_CONSTANT = """ ...@@ -36,25 +36,34 @@ DUMMY_CONSTANT = """
DUMMY_PRETRAINED_CLASS = """ DUMMY_PRETRAINED_CLASS = """
class {0}: class {0}:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_{1}(self) requires_backends(self, {1})
@classmethod @classmethod
def from_pretrained(self, *args, **kwargs): def from_pretrained(self, *args, **kwargs):
requires_{1}(self) requires_backends(self, {1})
""" """
DUMMY_CLASS = """ DUMMY_CLASS = """
class {0}: class {0}:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
requires_{1}(self) requires_backends(self, {1})
""" """
DUMMY_FUNCTION = """ DUMMY_FUNCTION = """
def {0}(*args, **kwargs): def {0}(*args, **kwargs):
requires_{1}({0}) requires_backends({0}, {1})
""" """
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def read_init(): def read_init():
""" Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. """ """ Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. """
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
...@@ -69,14 +78,10 @@ def read_init(): ...@@ -69,14 +78,10 @@ def read_init():
# Go through the end of the file # Go through the end of the file
while line_index < len(lines): while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated. # If the line is an if is_backend_available, we grab all objects associated.
if _re_test_backend.search(lines[line_index]) is not None: backend = find_backend(lines[line_index])
backend = _re_test_backend.search(lines[line_index]).groups()[0] if backend is not None:
line_index += 1 line_index += 1
# Ignore if backend isn't tracked for dummies.
if backend not in BACKENDS:
continue
objects = [] objects = []
# Until we unindent, add backend objects to the list # Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
...@@ -128,13 +133,12 @@ def create_dummy_files(): ...@@ -128,13 +133,12 @@ def create_dummy_files():
""" Create the content of the dummy files. """ """ Create the content of the dummy files. """
backend_specific_objects = read_init() backend_specific_objects = read_init()
# For special correspondence backend to module name as used in the function requires_modulename # For special correspondence backend to module name as used in the function requires_modulename
module_names = {"torch": "pytorch"}
dummy_files = {} dummy_files = {}
for backend, objects in backend_specific_objects.items(): for backend, objects in backend_specific_objects.items():
backend_name = module_names.get(backend, backend) backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]"
dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += f"from ..file_utils import requires_{backend_name}\n\n" dummy_file += "from ..file_utils import requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file dummy_files[backend] = dummy_file
...@@ -156,8 +160,11 @@ def check_dummies(overwrite=False): ...@@ -156,8 +160,11 @@ def check_dummies(overwrite=False):
actual_dummies = {} actual_dummies = {}
for backend, file_path in dummy_file_paths.items(): for backend, file_path in dummy_file_paths.items():
with open(file_path, "r", encoding="utf-8", newline="\n") as f: if os.path.isfile(file_path):
actual_dummies[backend] = f.read() with open(file_path, "r", encoding="utf-8", newline="\n") as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ""
for backend in dummy_files.keys(): for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]: if dummy_files[backend] != actual_dummies[backend]:
......
...@@ -18,12 +18,14 @@ import re ...@@ -18,12 +18,14 @@ import re
PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TRANSFORMERS = "src/transformers"
BACKENDS = ["torch", "tf", "flax", "sentencepiece", "speech", "tokenizers", "vision"]
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z]*)_available()")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"] # Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') _re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if is_foo_available # Catches a line if is_foo_available
_re_test_backend = re.compile(r"^\s*if\s+is\_([a-z]*)\_available\(\):\s*$") _re_test_backend = re.compile(r"^\s*if\s+is\_[a-z]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo") # Catches a line _import_struct["bla"].append("foo")
_re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') _re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
...@@ -36,6 +38,15 @@ _re_between_brackets = re.compile("^\s+\[([^\]]+)\]") ...@@ -36,6 +38,15 @@ _re_between_brackets = re.compile("^\s+\[([^\]]+)\]")
_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") _re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def parse_init(init_file): def parse_init(init_file):
""" """
Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects
...@@ -54,7 +65,7 @@ def parse_init(init_file): ...@@ -54,7 +65,7 @@ def parse_init(init_file):
# First grab the objects without a specific backend in _import_structure # First grab the objects without a specific backend in _import_structure
objects = [] objects = []
while not lines[line_index].startswith("if TYPE_CHECKING") and _re_test_backend.search(lines[line_index]) is None: while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
line = lines[line_index] line = lines[line_index]
single_line_import_search = _re_import_struct_key_value.search(line) single_line_import_search = _re_import_struct_key_value.search(line)
if single_line_import_search is not None: if single_line_import_search is not None:
...@@ -68,14 +79,10 @@ def parse_init(init_file): ...@@ -68,14 +79,10 @@ def parse_init(init_file):
# Let's continue with backend-specific objects in _import_structure # Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"): while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if is_backend_available, we grab all objects associated. # If the line is an if is_backend_available, we grab all objects associated.
if _re_test_backend.search(lines[line_index]) is not None: backend = find_backend(lines[line_index])
backend = _re_test_backend.search(lines[line_index]).groups()[0] if backend is not None:
line_index += 1 line_index += 1
# Ignore if backend isn't tracked for dummies.
if backend not in BACKENDS:
continue
objects = [] objects = []
# Until we unindent, add backend objects to the list # Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4): while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
...@@ -106,7 +113,7 @@ def parse_init(init_file): ...@@ -106,7 +113,7 @@ def parse_init(init_file):
objects = [] objects = []
while ( while (
line_index < len(lines) line_index < len(lines)
and _re_test_backend.search(lines[line_index]) is None and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else") and not lines[line_index].startswith("else")
): ):
line = lines[line_index] line = lines[line_index]
...@@ -121,14 +128,10 @@ def parse_init(init_file): ...@@ -121,14 +128,10 @@ def parse_init(init_file):
# Let's continue with backend-specific objects # Let's continue with backend-specific objects
while line_index < len(lines): while line_index < len(lines):
# If the line is an if is_backemd_available, we grab all objects associated. # If the line is an if is_backemd_available, we grab all objects associated.
if _re_test_backend.search(lines[line_index]) is not None: backend = find_backend(lines[line_index])
backend = _re_test_backend.search(lines[line_index]).groups()[0] if backend is not None:
line_index += 1 line_index += 1
# Ignore if backend isn't tracked for dummies.
if backend not in BACKENDS:
continue
objects = [] objects = []
# Until we unindent, add backend objects to the list # Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment