Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
73bf620d
Unverified
Commit
73bf620d
authored
Sep 12, 2023
by
Kashif Rasul
Committed by
GitHub
Sep 12, 2023
Browse files
fix E721 Do not compare types, use `isinstance()` (#4992)
parent
c806f2fa
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
13 additions
and
13 deletions
+13
-13
examples/community/lpw_stable_diffusion_xl.py
examples/community/lpw_stable_diffusion_xl.py
+1
-1
examples/community/stable_diffusion_xl_reference.py
examples/community/stable_diffusion_xl_reference.py
+1
-1
src/diffusers/experimental/rl/value_guided_sampling.py
src/diffusers/experimental/rl/value_guided_sampling.py
+1
-1
src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py
...ers/pipelines/audio_diffusion/pipeline_audio_diffusion.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
...lines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
...able_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
+1
-1
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
...usion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
+1
-1
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
...lines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
+1
-1
tests/pipelines/consistency_models/test_consistency_models.py
...s/pipelines/consistency_models/test_consistency_models.py
+1
-1
tests/pipelines/unidiffuser/test_unidiffuser.py
tests/pipelines/unidiffuser/test_unidiffuser.py
+3
-3
No files found.
examples/community/lpw_stable_diffusion_xl.py
View file @
73bf620d
...
@@ -1138,7 +1138,7 @@ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, Lo
...
@@ -1138,7 +1138,7 @@ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, Lo
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
# 7.1 Apply denoising_end
# 7.1 Apply denoising_end
if
denoising_end
is
not
None
and
typ
e
(
denoising_end
)
==
float
and
denoising_end
>
0
and
denoising_end
<
1
:
if
denoising_end
is
not
None
and
isinstanc
e
(
denoising_end
,
float
)
and
denoising_end
>
0
and
denoising_end
<
1
:
discrete_timestep_cutoff
=
int
(
discrete_timestep_cutoff
=
int
(
round
(
round
(
self
.
scheduler
.
config
.
num_train_timesteps
self
.
scheduler
.
config
.
num_train_timesteps
...
...
examples/community/stable_diffusion_xl_reference.py
View file @
73bf620d
...
@@ -701,7 +701,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
...
@@ -701,7 +701,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
# 10.1 Apply denoising_end
# 10.1 Apply denoising_end
if
denoising_end
is
not
None
and
typ
e
(
denoising_end
)
==
float
and
denoising_end
>
0
and
denoising_end
<
1
:
if
denoising_end
is
not
None
and
isinstanc
e
(
denoising_end
,
float
)
and
denoising_end
>
0
and
denoising_end
<
1
:
discrete_timestep_cutoff
=
int
(
discrete_timestep_cutoff
=
int
(
round
(
round
(
self
.
scheduler
.
config
.
num_train_timesteps
self
.
scheduler
.
config
.
num_train_timesteps
...
...
src/diffusers/experimental/rl/value_guided_sampling.py
View file @
73bf620d
...
@@ -76,7 +76,7 @@ class ValueGuidedRLPipeline(DiffusionPipeline):
...
@@ -76,7 +76,7 @@ class ValueGuidedRLPipeline(DiffusionPipeline):
return
x_in
*
self
.
stds
[
key
]
+
self
.
means
[
key
]
return
x_in
*
self
.
stds
[
key
]
+
self
.
means
[
key
]
def
to_torch
(
self
,
x_in
):
def
to_torch
(
self
,
x_in
):
if
typ
e
(
x_in
)
is
dict
:
if
isinstanc
e
(
x_in
,
dict
)
:
return
{
k
:
self
.
to_torch
(
v
)
for
k
,
v
in
x_in
.
items
()}
return
{
k
:
self
.
to_torch
(
v
)
for
k
,
v
in
x_in
.
items
()}
elif
torch
.
is_tensor
(
x_in
):
elif
torch
.
is_tensor
(
x_in
):
return
x_in
.
to
(
self
.
unet
.
device
)
return
x_in
.
to
(
self
.
unet
.
device
)
...
...
src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py
View file @
73bf620d
...
@@ -178,7 +178,7 @@ class AudioDiffusionPipeline(DiffusionPipeline):
...
@@ -178,7 +178,7 @@ class AudioDiffusionPipeline(DiffusionPipeline):
self
.
scheduler
.
set_timesteps
(
steps
)
self
.
scheduler
.
set_timesteps
(
steps
)
step_generator
=
step_generator
or
generator
step_generator
=
step_generator
or
generator
# For backwards compatibility
# For backwards compatibility
if
typ
e
(
self
.
unet
.
config
.
sample_size
)
==
int
:
if
isinstanc
e
(
self
.
unet
.
config
.
sample_size
,
int
)
:
self
.
unet
.
config
.
sample_size
=
(
self
.
unet
.
config
.
sample_size
,
self
.
unet
.
config
.
sample_size
)
self
.
unet
.
config
.
sample_size
=
(
self
.
unet
.
config
.
sample_size
,
self
.
unet
.
config
.
sample_size
)
if
noise
is
None
:
if
noise
is
None
:
noise
=
randn_tensor
(
noise
=
randn_tensor
(
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
View file @
73bf620d
...
@@ -810,7 +810,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoad
...
@@ -810,7 +810,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoad
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
# 7.1 Apply denoising_end
# 7.1 Apply denoising_end
if
denoising_end
is
not
None
and
typ
e
(
denoising_end
)
==
float
and
denoising_end
>
0
and
denoising_end
<
1
:
if
denoising_end
is
not
None
and
isinstanc
e
(
denoising_end
,
float
)
and
denoising_end
>
0
and
denoising_end
<
1
:
discrete_timestep_cutoff
=
int
(
discrete_timestep_cutoff
=
int
(
round
(
round
(
self
.
scheduler
.
config
.
num_train_timesteps
self
.
scheduler
.
config
.
num_train_timesteps
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
View file @
73bf620d
...
@@ -885,7 +885,7 @@ class StableDiffusionXLImg2ImgPipeline(
...
@@ -885,7 +885,7 @@ class StableDiffusionXLImg2ImgPipeline(
# 5. Prepare timesteps
# 5. Prepare timesteps
def
denoising_value_valid
(
dnv
):
def
denoising_value_valid
(
dnv
):
return
typ
e
(
denoising_end
)
==
float
and
0
<
dnv
<
1
return
isinstanc
e
(
denoising_end
,
float
)
and
0
<
dnv
<
1
self
.
scheduler
.
set_timesteps
(
num_inference_steps
,
device
=
device
)
self
.
scheduler
.
set_timesteps
(
num_inference_steps
,
device
=
device
)
timesteps
,
num_inference_steps
=
self
.
get_timesteps
(
timesteps
,
num_inference_steps
=
self
.
get_timesteps
(
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
View file @
73bf620d
...
@@ -1120,7 +1120,7 @@ class StableDiffusionXLInpaintPipeline(
...
@@ -1120,7 +1120,7 @@ class StableDiffusionXLInpaintPipeline(
# 4. set timesteps
# 4. set timesteps
def
denoising_value_valid
(
dnv
):
def
denoising_value_valid
(
dnv
):
return
typ
e
(
denoising_end
)
==
float
and
0
<
dnv
<
1
return
isinstanc
e
(
denoising_end
,
float
)
and
0
<
dnv
<
1
self
.
scheduler
.
set_timesteps
(
num_inference_steps
,
device
=
device
)
self
.
scheduler
.
set_timesteps
(
num_inference_steps
,
device
=
device
)
timesteps
,
num_inference_steps
=
self
.
get_timesteps
(
timesteps
,
num_inference_steps
=
self
.
get_timesteps
(
...
...
src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py
View file @
73bf620d
...
@@ -837,7 +837,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
...
@@ -837,7 +837,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
# 11. Denoising loop
# 11. Denoising loop
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
if
denoising_end
is
not
None
and
typ
e
(
denoising_end
)
==
float
and
denoising_end
>
0
and
denoising_end
<
1
:
if
denoising_end
is
not
None
and
isinstanc
e
(
denoising_end
,
float
)
and
denoising_end
>
0
and
denoising_end
<
1
:
discrete_timestep_cutoff
=
int
(
discrete_timestep_cutoff
=
int
(
round
(
round
(
self
.
scheduler
.
config
.
num_train_timesteps
self
.
scheduler
.
config
.
num_train_timesteps
...
...
src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
View file @
73bf620d
...
@@ -886,7 +886,7 @@ class StableDiffusionXLAdapterPipeline(
...
@@ -886,7 +886,7 @@ class StableDiffusionXLAdapterPipeline(
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
num_warmup_steps
=
max
(
len
(
timesteps
)
-
num_inference_steps
*
self
.
scheduler
.
order
,
0
)
# 7.1 Apply denoising_end
# 7.1 Apply denoising_end
if
denoising_end
is
not
None
and
typ
e
(
denoising_end
)
==
float
and
denoising_end
>
0
and
denoising_end
<
1
:
if
denoising_end
is
not
None
and
isinstanc
e
(
denoising_end
,
float
)
and
denoising_end
>
0
and
denoising_end
<
1
:
discrete_timestep_cutoff
=
int
(
discrete_timestep_cutoff
=
int
(
round
(
round
(
self
.
scheduler
.
config
.
num_train_timesteps
self
.
scheduler
.
config
.
num_train_timesteps
...
...
tests/pipelines/consistency_models/test_consistency_models.py
View file @
73bf620d
...
@@ -193,7 +193,7 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
...
@@ -193,7 +193,7 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
return
inputs
return
inputs
def
get_fixed_latents
(
self
,
seed
=
0
,
device
=
"cpu"
,
dtype
=
torch
.
float32
,
shape
=
(
1
,
3
,
64
,
64
)):
def
get_fixed_latents
(
self
,
seed
=
0
,
device
=
"cpu"
,
dtype
=
torch
.
float32
,
shape
=
(
1
,
3
,
64
,
64
)):
if
typ
e
(
device
)
==
str
:
if
isinstanc
e
(
device
,
str
)
:
device
=
torch
.
device
(
device
)
device
=
torch
.
device
(
device
)
generator
=
torch
.
Generator
(
device
=
device
).
manual_seed
(
seed
)
generator
=
torch
.
Generator
(
device
=
device
).
manual_seed
(
seed
)
latents
=
randn_tensor
(
shape
,
generator
=
generator
,
device
=
device
,
dtype
=
dtype
)
latents
=
randn_tensor
(
shape
,
generator
=
generator
,
device
=
device
,
dtype
=
dtype
)
...
...
tests/pipelines/unidiffuser/test_unidiffuser.py
View file @
73bf620d
...
@@ -109,7 +109,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
...
@@ -109,7 +109,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
return
inputs
return
inputs
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
if
typ
e
(
device
)
==
str
:
if
isinstanc
e
(
device
,
str
)
:
device
=
torch
.
device
(
device
)
device
=
torch
.
device
(
device
)
generator
=
torch
.
Generator
(
device
=
device
).
manual_seed
(
seed
)
generator
=
torch
.
Generator
(
device
=
device
).
manual_seed
(
seed
)
# Hardcode the shapes for now.
# Hardcode the shapes for now.
...
@@ -545,7 +545,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
...
@@ -545,7 +545,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
return
inputs
return
inputs
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
if
typ
e
(
device
)
==
str
:
if
isinstanc
e
(
device
,
str
)
:
device
=
torch
.
device
(
device
)
device
=
torch
.
device
(
device
)
latent_device
=
torch
.
device
(
"cpu"
)
latent_device
=
torch
.
device
(
"cpu"
)
generator
=
torch
.
Generator
(
device
=
latent_device
).
manual_seed
(
seed
)
generator
=
torch
.
Generator
(
device
=
latent_device
).
manual_seed
(
seed
)
...
@@ -648,7 +648,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
...
@@ -648,7 +648,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
return
inputs
return
inputs
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
def
get_fixed_latents
(
self
,
device
,
seed
=
0
):
if
typ
e
(
device
)
==
str
:
if
isinstanc
e
(
device
,
str
)
:
device
=
torch
.
device
(
device
)
device
=
torch
.
device
(
device
)
latent_device
=
torch
.
device
(
"cpu"
)
latent_device
=
torch
.
device
(
"cpu"
)
generator
=
torch
.
Generator
(
device
=
latent_device
).
manual_seed
(
seed
)
generator
=
torch
.
Generator
(
device
=
latent_device
).
manual_seed
(
seed
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment