Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
c0f5346a
Unverified
Commit
c0f5346a
authored
Feb 16, 2024
by
co63oc
Committed by
GitHub
Feb 15, 2024
Browse files
Fix procecss process (#6591)
* Fix words * Fix --------- Co-authored-by:
YiYi Xu
<
yixu310@gmail.com
>
parent
087daee2
Changes
61
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
33 additions
and
33 deletions
+33
-33
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py
...elines/stable_diffusion/pipeline_stable_unclip_img2img.py
+2
-2
src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py
...and_excite/pipeline_stable_diffusion_attend_and_excite.py
+2
-2
src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py
..._diffusion_diffedit/pipeline_stable_diffusion_diffedit.py
+2
-2
src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py
...able_diffusion_gligen/pipeline_stable_diffusion_gligen.py
+2
-2
src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py
...ion_gligen/pipeline_stable_diffusion_gligen_text_image.py
+2
-2
src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py
...sion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py
+2
-2
src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py
...n_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py
+1
-1
src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py
...stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py
+2
-2
src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py
..._diffusion_panorama/pipeline_stable_diffusion_panorama.py
+2
-2
src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
...nes/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
+2
-2
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
...lines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
...usion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
+1
-1
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
...ipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
+2
-2
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
...lines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
+1
-1
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py
...s/text_to_video_synthesis/pipeline_text_to_video_synth.py
+2
-2
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py
...o_video_synthesis/pipeline_text_to_video_synth_img2img.py
+2
-2
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py
...es/text_to_video_synthesis/pipeline_text_to_video_zero.py
+2
-2
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py
...xt_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py
+1
-1
No files found.
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py
View file @
c0f5346a
...
@@ -321,7 +321,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
...
@@ -321,7 +321,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -403,7 +403,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
...
@@ -403,7 +403,7 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py
View file @
c0f5346a
...
@@ -356,7 +356,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
...
@@ -356,7 +356,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -438,7 +438,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
...
@@ -438,7 +438,7 @@ class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, TextualInversion
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py
View file @
c0f5346a
...
@@ -498,7 +498,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
...
@@ -498,7 +498,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -580,7 +580,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
...
@@ -580,7 +580,7 @@ class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderM
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py
View file @
c0f5346a
...
@@ -295,7 +295,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
...
@@ -295,7 +295,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -377,7 +377,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
...
@@ -377,7 +377,7 @@ class StableDiffusionGLIGENPipeline(DiffusionPipeline):
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py
View file @
c0f5346a
...
@@ -320,7 +320,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
...
@@ -320,7 +320,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -402,7 +402,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
...
@@ -402,7 +402,7 @@ class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline):
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py
View file @
c0f5346a
...
@@ -238,7 +238,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
...
@@ -238,7 +238,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -320,7 +320,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
...
@@ -320,7 +320,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py
View file @
c0f5346a
...
@@ -325,7 +325,7 @@ class StableDiffusionXLKDiffusionPipeline(
...
@@ -325,7 +325,7 @@ class StableDiffusionXLKDiffusionPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py
View file @
c0f5346a
...
@@ -292,7 +292,7 @@ class StableDiffusionLDM3DPipeline(
...
@@ -292,7 +292,7 @@ class StableDiffusionLDM3DPipeline(
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -374,7 +374,7 @@ class StableDiffusionLDM3DPipeline(
...
@@ -374,7 +374,7 @@ class StableDiffusionLDM3DPipeline(
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py
View file @
c0f5346a
...
@@ -250,7 +250,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
...
@@ -250,7 +250,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -332,7 +332,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
...
@@ -332,7 +332,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
View file @
c0f5346a
...
@@ -271,7 +271,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
...
@@ -271,7 +271,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -353,7 +353,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
...
@@ -353,7 +353,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin,
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
View file @
c0f5346a
...
@@ -385,7 +385,7 @@ class StableDiffusionXLPipeline(
...
@@ -385,7 +385,7 @@ class StableDiffusionXLPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
View file @
c0f5346a
...
@@ -407,7 +407,7 @@ class StableDiffusionXLImg2ImgPipeline(
...
@@ -407,7 +407,7 @@ class StableDiffusionXLImg2ImgPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
View file @
c0f5346a
...
@@ -618,7 +618,7 @@ class StableDiffusionXLInpaintPipeline(
...
@@ -618,7 +618,7 @@ class StableDiffusionXLInpaintPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
View file @
c0f5346a
...
@@ -326,7 +326,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
...
@@ -326,7 +326,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
View file @
c0f5346a
...
@@ -358,7 +358,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
...
@@ -358,7 +358,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -440,7 +440,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
...
@@ -440,7 +440,7 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
View file @
c0f5346a
...
@@ -399,7 +399,7 @@ class StableDiffusionXLAdapterPipeline(
...
@@ -399,7 +399,7 @@ class StableDiffusionXLAdapterPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py
View file @
c0f5346a
...
@@ -256,7 +256,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
...
@@ -256,7 +256,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -338,7 +338,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
...
@@ -338,7 +338,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py
View file @
c0f5346a
...
@@ -332,7 +332,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
...
@@ -332,7 +332,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -414,7 +414,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
...
@@ -414,7 +414,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py
View file @
c0f5346a
...
@@ -838,7 +838,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
...
@@ -838,7 +838,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
batch_size
=
prompt_embeds
.
shape
[
0
]
batch_size
=
prompt_embeds
.
shape
[
0
]
if
prompt_embeds
is
None
:
if
prompt_embeds
is
None
:
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
prompt
=
self
.
maybe_convert_prompt
(
prompt
,
self
.
tokenizer
)
...
@@ -920,7 +920,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
...
@@ -920,7 +920,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
else
:
else
:
uncond_tokens
=
negative_prompt
uncond_tokens
=
negative_prompt
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
if
isinstance
(
self
,
TextualInversionLoaderMixin
):
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
uncond_tokens
=
self
.
maybe_convert_prompt
(
uncond_tokens
,
self
.
tokenizer
)
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py
View file @
c0f5346a
...
@@ -685,7 +685,7 @@ class TextToVideoZeroSDXLPipeline(
...
@@ -685,7 +685,7 @@ class TextToVideoZeroSDXLPipeline(
prompt_2
=
prompt_2
or
prompt
prompt_2
=
prompt_2
or
prompt
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
prompt_2
=
[
prompt_2
]
if
isinstance
(
prompt_2
,
str
)
else
prompt_2
# textual inversion: proce
c
ss multi-vector tokens if necessary
# textual inversion: process multi-vector tokens if necessary
prompt_embeds_list
=
[]
prompt_embeds_list
=
[]
prompts
=
[
prompt
,
prompt_2
]
prompts
=
[
prompt
,
prompt_2
]
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
for
prompt
,
tokenizer
,
text_encoder
in
zip
(
prompts
,
tokenizers
,
text_encoders
):
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment