"docs/source/vscode:/vscode.git/clone" did not exist on "311bd88a040d42619f5eccd5a3f53852ec2be3e1"
Unverified Commit 8dc93ad3 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[Import] Don't force transformers to be installed (#5035)

* [Import] Don't force transformers to be installed

* make style
parent e2033d2d
...@@ -42,7 +42,7 @@ from .utils.import_utils import BACKENDS_MAPPING ...@@ -42,7 +42,7 @@ from .utils.import_utils import BACKENDS_MAPPING
if is_transformers_available(): if is_transformers_available():
from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer from transformers import CLIPTextModel, CLIPTextModelWithProjection
if is_accelerate_available(): if is_accelerate_available():
from accelerate import init_empty_weights from accelerate import init_empty_weights
...@@ -628,7 +628,7 @@ class TextualInversionLoaderMixin: ...@@ -628,7 +628,7 @@ class TextualInversionLoaderMixin:
Load textual inversion tokens and embeddings to the tokenizer and text encoder. Load textual inversion tokens and embeddings to the tokenizer and text encoder.
""" """
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
r""" r"""
Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
...@@ -655,7 +655,7 @@ class TextualInversionLoaderMixin: ...@@ -655,7 +655,7 @@ class TextualInversionLoaderMixin:
return prompts return prompts
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
r""" r"""
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
...@@ -689,8 +689,8 @@ class TextualInversionLoaderMixin: ...@@ -689,8 +689,8 @@ class TextualInversionLoaderMixin:
self, self,
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
token: Optional[Union[str, List[str]]] = None, token: Optional[Union[str, List[str]]] = None,
tokenizer: Optional[PreTrainedTokenizer] = None, tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
text_encoder: Optional[PreTrainedModel] = None, text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
**kwargs, **kwargs,
): ):
r""" r"""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment