Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
3a0d3da6
Unverified
Commit
3a0d3da6
authored
Feb 06, 2023
by
nickkolok
Committed by
GitHub
Feb 06, 2023
Browse files
Fix a typo: bfloa16 -> bfloat16 (#2243)
parent
22c1ba56
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
8 additions
and
8 deletions
+8
-8
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py
...stable_diffusion/pipeline_stable_diffusion_k_diffusion.py
+1
-1
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py
...nes/stable_diffusion/pipeline_stable_diffusion_upscale.py
+1
-1
src/diffusers/pipelines/stable_diffusion/safety_checker.py
src/diffusers/pipelines/stable_diffusion/safety_checker.py
+1
-1
src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
...s/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
+1
-1
src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py
...ffusers/pipelines/stable_diffusion_safe/safety_checker.py
+1
-1
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py
...ile_diffusion/pipeline_versatile_diffusion_dual_guided.py
+1
-1
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py
...diffusion/pipeline_versatile_diffusion_image_variation.py
+1
-1
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py
...e_diffusion/pipeline_versatile_diffusion_text_to_image.py
+1
-1
No files found.
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py
View file @
3a0d3da6
...
@@ -316,7 +316,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline):
...
@@ -316,7 +316,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py
View file @
3a0d3da6
...
@@ -313,7 +313,7 @@ class StableDiffusionUpscalePipeline(DiffusionPipeline):
...
@@ -313,7 +313,7 @@ class StableDiffusionUpscalePipeline(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
src/diffusers/pipelines/stable_diffusion/safety_checker.py
View file @
3a0d3da6
...
@@ -52,7 +52,7 @@ class StableDiffusionSafetyChecker(PreTrainedModel):
...
@@ -52,7 +52,7 @@ class StableDiffusionSafetyChecker(PreTrainedModel):
pooled_output
=
self
.
vision_model
(
clip_input
)[
1
]
# pooled_output
pooled_output
=
self
.
vision_model
(
clip_input
)[
1
]
# pooled_output
image_embeds
=
self
.
visual_projection
(
pooled_output
)
image_embeds
=
self
.
visual_projection
(
pooled_output
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
special_cos_dist
=
cosine_distance
(
image_embeds
,
self
.
special_care_embeds
).
cpu
().
float
().
numpy
()
special_cos_dist
=
cosine_distance
(
image_embeds
,
self
.
special_care_embeds
).
cpu
().
float
().
numpy
()
cos_dist
=
cosine_distance
(
image_embeds
,
self
.
concept_embeds
).
cpu
().
float
().
numpy
()
cos_dist
=
cosine_distance
(
image_embeds
,
self
.
concept_embeds
).
cpu
().
float
().
numpy
()
...
...
src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py
View file @
3a0d3da6
...
@@ -367,7 +367,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
...
@@ -367,7 +367,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py
View file @
3a0d3da6
...
@@ -51,7 +51,7 @@ class SafeStableDiffusionSafetyChecker(PreTrainedModel):
...
@@ -51,7 +51,7 @@ class SafeStableDiffusionSafetyChecker(PreTrainedModel):
pooled_output
=
self
.
vision_model
(
clip_input
)[
1
]
# pooled_output
pooled_output
=
self
.
vision_model
(
clip_input
)[
1
]
# pooled_output
image_embeds
=
self
.
visual_projection
(
pooled_output
)
image_embeds
=
self
.
visual_projection
(
pooled_output
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
special_cos_dist
=
cosine_distance
(
image_embeds
,
self
.
special_care_embeds
).
cpu
().
float
().
numpy
()
special_cos_dist
=
cosine_distance
(
image_embeds
,
self
.
special_care_embeds
).
cpu
().
float
().
numpy
()
cos_dist
=
cosine_distance
(
image_embeds
,
self
.
concept_embeds
).
cpu
().
float
().
numpy
()
cos_dist
=
cosine_distance
(
image_embeds
,
self
.
concept_embeds
).
cpu
().
float
().
numpy
()
...
...
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py
View file @
3a0d3da6
...
@@ -333,7 +333,7 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline):
...
@@ -333,7 +333,7 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py
View file @
3a0d3da6
...
@@ -193,7 +193,7 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline):
...
@@ -193,7 +193,7 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py
View file @
3a0d3da6
...
@@ -250,7 +250,7 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline):
...
@@ -250,7 +250,7 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline):
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
latents
=
1
/
self
.
vae
.
config
.
scaling_factor
*
latents
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
self
.
vae
.
decode
(
latents
).
sample
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa
t
16
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
image
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
).
float
().
numpy
()
return
image
return
image
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment