Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
bdd16116
Unverified
Commit
bdd16116
authored
Oct 02, 2023
by
Patrick von Platen
Committed by
GitHub
Oct 02, 2023
Browse files
[Schedulers] Fix callback steps (#5261)
* fix all * make fix copies * make fix copies
parent
c8b0f0eb
Changes
87
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
42 additions
and
21 deletions
+42
-21
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py
...ble_diffusion/pipeline_stable_diffusion_inpaint_legacy.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
...e_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py
...ble_diffusion/pipeline_stable_diffusion_latent_upscale.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py
...lines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py
...able_diffusion/pipeline_stable_diffusion_model_editing.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py
...es/stable_diffusion/pipeline_stable_diffusion_panorama.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py
...table_diffusion/pipeline_stable_diffusion_pix2pix_zero.py
+4
-2
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py
...pelines/stable_diffusion/pipeline_stable_diffusion_sag.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py
...nes/stable_diffusion/pipeline_stable_diffusion_upscale.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py
...sers/pipelines/stable_diffusion/pipeline_stable_unclip.py
+2
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py
...elines/stable_diffusion/pipeline_stable_unclip_img2img.py
+2
-1
src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
...s/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
+2
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
...lines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
+2
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
+2
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
+2
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
...usion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
+2
-1
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
...ipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
+2
-1
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
...lines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
+2
-1
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py
...s/text_to_video_synthesis/pipeline_text_to_video_synth.py
+2
-1
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py
...o_video_synthesis/pipeline_text_to_video_synth_img2img.py
+2
-1
No files found.
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py
View file @
bdd16116
...
@@ -744,7 +744,8 @@ class StableDiffusionInpaintPipelineLegacy(
...
@@ -744,7 +744,8 @@ class StableDiffusionInpaintPipelineLegacy(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
# use original latents corresponding to unmasked portions of the image
# use original latents corresponding to unmasked portions of the image
latents
=
(
init_latents_orig
*
mask
)
+
(
latents
*
(
1
-
mask
))
latents
=
(
init_latents_orig
*
mask
)
+
(
latents
*
(
1
-
mask
))
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
View file @
bdd16116
...
@@ -378,7 +378,8 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
...
@@ -378,7 +378,8 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py
View file @
bdd16116
...
@@ -472,7 +472,8 @@ class StableDiffusionLatentUpscalePipeline(DiffusionPipeline):
...
@@ -472,7 +472,8 @@ class StableDiffusionLatentUpscalePipeline(DiffusionPipeline):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py
View file @
bdd16116
...
@@ -674,7 +674,8 @@ class StableDiffusionLDM3DPipeline(
...
@@ -674,7 +674,8 @@ class StableDiffusionLDM3DPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py
View file @
bdd16116
...
@@ -802,7 +802,8 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa
...
@@ -802,7 +802,8 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py
View file @
bdd16116
...
@@ -770,7 +770,8 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
...
@@ -770,7 +770,8 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
if
circular_padding
:
if
circular_padding
:
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py
View file @
bdd16116
...
@@ -1006,7 +1006,8 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline):
...
@@ -1006,7 +1006,8 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
# 8. Compute the edit directions.
# 8. Compute the edit directions.
edit_direction
=
self
.
construct_direction
(
source_embeds
,
target_embeds
).
to
(
prompt_embeds
.
device
)
edit_direction
=
self
.
construct_direction
(
source_embeds
,
target_embeds
).
to
(
prompt_embeds
.
device
)
...
@@ -1283,7 +1284,8 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline):
...
@@ -1283,7 +1284,8 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline):
):
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
inverted_latents
=
latents
.
detach
().
clone
()
inverted_latents
=
latents
.
detach
().
clone
()
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py
View file @
bdd16116
...
@@ -712,7 +712,8 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
...
@@ -712,7 +712,8 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py
View file @
bdd16116
...
@@ -756,7 +756,8 @@ class StableDiffusionUpscalePipeline(DiffusionPipeline, TextualInversionLoaderMi
...
@@ -756,7 +756,8 @@ class StableDiffusionUpscalePipeline(DiffusionPipeline, TextualInversionLoaderMi
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py
View file @
bdd16116
...
@@ -925,7 +925,8 @@ class StableUnCLIPPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraL
...
@@ -925,7 +925,8 @@ class StableUnCLIPPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraL
latents
=
self
.
scheduler
.
step
(
noise_pred
,
t
,
latents
,
**
extra_step_kwargs
,
return_dict
=
False
)[
0
]
latents
=
self
.
scheduler
.
step
(
noise_pred
,
t
,
latents
,
**
extra_step_kwargs
,
return_dict
=
False
)[
0
]
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
image
=
self
.
vae
.
decode
(
latents
/
self
.
vae
.
config
.
scaling_factor
,
return_dict
=
False
)[
0
]
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py
View file @
bdd16116
...
@@ -821,7 +821,8 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
...
@@ -821,7 +821,8 @@ class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
latents
=
self
.
scheduler
.
step
(
noise_pred
,
t
,
latents
,
**
extra_step_kwargs
,
return_dict
=
False
)[
0
]
latents
=
self
.
scheduler
.
step
(
noise_pred
,
t
,
latents
,
**
extra_step_kwargs
,
return_dict
=
False
)[
0
]
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
# 9. Post-processing
# 9. Post-processing
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
...
...
src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
View file @
bdd16116
...
@@ -674,7 +674,8 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
...
@@ -674,7 +674,8 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
# 8. Post-processing
# 8. Post-processing
image
=
self
.
decode_latents
(
latents
)
image
=
self
.
decode_latents
(
latents
)
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
View file @
bdd16116
...
@@ -877,7 +877,8 @@ class StableDiffusionXLPipeline(
...
@@ -877,7 +877,8 @@ class StableDiffusionXLPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
View file @
bdd16116
...
@@ -1028,7 +1028,8 @@ class StableDiffusionXLImg2ImgPipeline(
...
@@ -1028,7 +1028,8 @@ class StableDiffusionXLImg2ImgPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
View file @
bdd16116
...
@@ -1352,7 +1352,8 @@ class StableDiffusionXLInpaintPipeline(
...
@@ -1352,7 +1352,8 @@ class StableDiffusionXLInpaintPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
View file @
bdd16116
...
@@ -923,7 +923,8 @@ class StableDiffusionXLInstructPix2PixPipeline(
...
@@ -923,7 +923,8 @@ class StableDiffusionXLInstructPix2PixPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
View file @
bdd16116
...
@@ -799,7 +799,8 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
...
@@ -799,7 +799,8 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
output_type
==
"latent"
:
if
output_type
==
"latent"
:
image
=
latents
image
=
latents
...
...
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
View file @
bdd16116
...
@@ -970,7 +970,8 @@ class StableDiffusionXLAdapterPipeline(
...
@@ -970,7 +970,8 @@ class StableDiffusionXLAdapterPipeline(
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
not
output_type
==
"latent"
:
if
not
output_type
==
"latent"
:
# make sure the VAE is in float32 mode, as it overflows in float16
# make sure the VAE is in float32 mode, as it overflows in float16
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py
View file @
bdd16116
...
@@ -664,7 +664,8 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
...
@@ -664,7 +664,8 @@ class TextToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lora
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
output_type
==
"latent"
:
if
output_type
==
"latent"
:
return
TextToVideoSDPipelineOutput
(
frames
=
latents
)
return
TextToVideoSDPipelineOutput
(
frames
=
latents
)
...
...
src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py
View file @
bdd16116
...
@@ -736,7 +736,8 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
...
@@ -736,7 +736,8 @@ class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
if
i
==
len
(
timesteps
)
-
1
or
((
i
+
1
)
>
num_warmup_steps
and
(
i
+
1
)
%
self
.
scheduler
.
order
==
0
):
progress_bar
.
update
()
progress_bar
.
update
()
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
if
callback
is
not
None
and
i
%
callback_steps
==
0
:
callback
(
i
,
t
,
latents
)
step_idx
=
i
//
getattr
(
self
.
scheduler
,
"order"
,
1
)
callback
(
step_idx
,
t
,
latents
)
if
output_type
==
"latent"
:
if
output_type
==
"latent"
:
return
TextToVideoSDPipelineOutput
(
frames
=
latents
)
return
TextToVideoSDPipelineOutput
(
frames
=
latents
)
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment