Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
a5bdb678
Commit
a5bdb678
authored
Mar 31, 2023
by
Patrick von Platen
Browse files
fix importing diffusers without transformers installed
parent
c4335626
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
33 additions
and
21 deletions
+33
-21
src/diffusers/__init__.py
src/diffusers/__init__.py
+2
-2
src/diffusers/loaders.py
src/diffusers/loaders.py
+2
-2
src/diffusers/pipelines/__init__.py
src/diffusers/pipelines/__init__.py
+3
-4
src/diffusers/pipelines/spectrogram_diffusion/__init__.py
src/diffusers/pipelines/spectrogram_diffusion/__init__.py
+22
-9
src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py
...tils/dummy_transformers_and_torch_and_note_seq_objects.py
+4
-4
No files found.
src/diffusers/__init__.py
View file @
a5bdb678
...
...
@@ -178,10 +178,10 @@ else:
from
.pipelines
import
AudioDiffusionPipeline
,
Mel
try
:
if
not
(
is_torch_available
()
and
is_note_seq_available
()):
if
not
(
is_transformers_available
()
and
is_torch_available
()
and
is_note_seq_available
()):
raise
OptionalDependencyNotAvailable
()
except
OptionalDependencyNotAvailable
:
from
.utils.dummy_torch_and_note_seq_objects
import
*
# noqa F403
from
.utils.dummy_
transformers_and_
torch_and_note_seq_objects
import
*
# noqa F403
else
:
from
.pipelines
import
SpectrogramDiffusionPipeline
...
...
src/diffusers/loaders.py
View file @
a5bdb678
...
...
@@ -306,7 +306,7 @@ class TextualInversionLoaderMixin:
Mixin class for loading textual inversion tokens and embeddings to the tokenizer and text encoder.
"""
def
maybe_convert_prompt
(
self
,
prompt
:
Union
[
str
,
List
[
str
]],
tokenizer
:
PreTrainedTokenizer
):
def
maybe_convert_prompt
(
self
,
prompt
:
Union
[
str
,
List
[
str
]],
tokenizer
:
"
PreTrainedTokenizer
"
):
r
"""
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
...
...
@@ -334,7 +334,7 @@ class TextualInversionLoaderMixin:
return
prompts
def
_maybe_convert_prompt
(
self
,
prompt
:
str
,
tokenizer
:
PreTrainedTokenizer
):
def
_maybe_convert_prompt
(
self
,
prompt
:
str
,
tokenizer
:
"
PreTrainedTokenizer
"
):
r
"""
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
...
...
src/diffusers/pipelines/__init__.py
View file @
a5bdb678
...
...
@@ -26,7 +26,6 @@ else:
from
.pndm
import
PNDMPipeline
from
.repaint
import
RePaintPipeline
from
.score_sde_ve
import
ScoreSdeVePipeline
from
.spectrogram_diffusion
import
SpectrogramDiffusionPipeline
from
.stochastic_karras_ve
import
KarrasVePipeline
try
:
...
...
@@ -132,9 +131,9 @@ else:
FlaxStableDiffusionPipeline
,
)
try
:
if
not
(
is_note_seq_available
()):
if
not
(
is_transformers_available
()
and
is_torch_available
()
and
is_note_seq_available
()):
raise
OptionalDependencyNotAvailable
()
except
OptionalDependencyNotAvailable
:
from
..utils.dummy_note_seq_objects
import
*
# noqa F403
from
..utils.dummy_
transformers_and_torch_and_
note_seq_objects
import
*
# noqa F403
else
:
from
.spectrogram_diffusion
import
MidiProcessor
from
.spectrogram_diffusion
import
SpectrogramDiffusionPipeline
src/diffusers/pipelines/spectrogram_diffusion/__init__.py
View file @
a5bdb678
# flake8: noqa
from
...utils
import
is_note_seq_available
from
...utils
import
is_note_seq_available
,
is_transformers_available
from
...utils
import
OptionalDependencyNotAvailable
from
.notes_encoder
import
SpectrogramNotesEncoder
from
.continous_encoder
import
SpectrogramContEncoder
from
.pipeline_spectrogram_diffusion
import
(
SpectrogramContEncoder
,
SpectrogramDiffusionPipeline
,
T5FilmDecoder
,
)
if
is_note_seq_available
():
try
:
if
not
(
is_transformers_available
()
and
is_torch_available
()):
raise
OptionalDependencyNotAvailable
()
except
OptionalDependencyNotAvailable
:
from
...utils.dummy_torch_and_transformers_objects
import
*
# noqa F403
else
:
from
.notes_encoder
import
SpectrogramNotesEncoder
from
.continous_encoder
import
SpectrogramContEncoder
from
.pipeline_spectrogram_diffusion
import
(
SpectrogramContEncoder
,
SpectrogramDiffusionPipeline
,
T5FilmDecoder
,
)
try
:
if
not
(
is_transformers_available
()
and
is_torch_available
()
and
is_note_seq_available
()):
raise
OptionalDependencyNotAvailable
()
except
OptionalDependencyNotAvailable
:
from
...utils.dummy_transformers_and_torch_and_note_seq_objects
import
*
# noqa F403
else
:
from
.midi_utils
import
MidiProcessor
src/diffusers/utils/dummy_torch_and_note_seq_objects.py
→
src/diffusers/utils/dummy_
transformers_and_
torch_and_note_seq_objects.py
View file @
a5bdb678
...
...
@@ -3,15 +3,15 @@ from ..utils import DummyObject, requires_backends
class
SpectrogramDiffusionPipeline
(
metaclass
=
DummyObject
):
_backends
=
[
"torch"
,
"note_seq"
]
_backends
=
[
"transformers"
,
"torch"
,
"note_seq"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
requires_backends
(
self
,
[
"torch"
,
"note_seq"
])
requires_backends
(
self
,
[
"transformers"
,
"torch"
,
"note_seq"
])
@
classmethod
def
from_config
(
cls
,
*
args
,
**
kwargs
):
requires_backends
(
cls
,
[
"torch"
,
"note_seq"
])
requires_backends
(
cls
,
[
"transformers"
,
"torch"
,
"note_seq"
])
@
classmethod
def
from_pretrained
(
cls
,
*
args
,
**
kwargs
):
requires_backends
(
cls
,
[
"torch"
,
"note_seq"
])
requires_backends
(
cls
,
[
"transformers"
,
"torch"
,
"note_seq"
])
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment