Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
18200240
Commit
18200240
authored
Jul 18, 2022
by
anton-l
Browse files
Make tqdm calls notebook-compatible
parent
ffe7b93b
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
12 additions
and
12 deletions
+12
-12
src/diffusers/pipelines/ddim/pipeline_ddim.py
src/diffusers/pipelines/ddim/pipeline_ddim.py
+2
-2
src/diffusers/pipelines/ddpm/pipeline_ddpm.py
src/diffusers/pipelines/ddpm/pipeline_ddpm.py
+2
-2
src/diffusers/pipelines/glide/pipeline_glide.py
src/diffusers/pipelines/glide/pipeline_glide.py
+2
-2
src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
...s/pipelines/latent_diffusion/pipeline_latent_diffusion.py
+2
-2
src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
...tent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
+2
-2
src/diffusers/pipelines/pndm/pipeline_pndm.py
src/diffusers/pipelines/pndm/pipeline_pndm.py
+2
-2
No files found.
src/diffusers/pipelines/ddim/pipeline_ddim.py
View file @
18200240
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
import
torch
import
torch
import
tqdm
from
tqdm.auto
import
tqdm
from
...pipeline_utils
import
DiffusionPipeline
from
...pipeline_utils
import
DiffusionPipeline
...
@@ -44,7 +44,7 @@ class DDIMPipeline(DiffusionPipeline):
...
@@ -44,7 +44,7 @@ class DDIMPipeline(DiffusionPipeline):
# set step values
# set step values
self
.
scheduler
.
set_timesteps
(
num_inference_steps
)
self
.
scheduler
.
set_timesteps
(
num_inference_steps
)
for
t
in
tqdm
.
tqdm
(
self
.
scheduler
.
timesteps
):
for
t
in
tqdm
(
self
.
scheduler
.
timesteps
):
# 1. predict noise model_output
# 1. predict noise model_output
with
torch
.
no_grad
():
with
torch
.
no_grad
():
model_output
=
self
.
unet
(
image
,
t
)
model_output
=
self
.
unet
(
image
,
t
)
...
...
src/diffusers/pipelines/ddpm/pipeline_ddpm.py
View file @
18200240
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
import
torch
import
torch
import
tqdm
from
tqdm.auto
import
tqdm
from
...pipeline_utils
import
DiffusionPipeline
from
...pipeline_utils
import
DiffusionPipeline
...
@@ -41,7 +41,7 @@ class DDPMPipeline(DiffusionPipeline):
...
@@ -41,7 +41,7 @@ class DDPMPipeline(DiffusionPipeline):
image
=
image
.
to
(
torch_device
)
image
=
image
.
to
(
torch_device
)
num_prediction_steps
=
len
(
self
.
scheduler
)
num_prediction_steps
=
len
(
self
.
scheduler
)
for
t
in
tqdm
.
tqdm
(
reversed
(
range
(
num_prediction_steps
)),
total
=
num_prediction_steps
):
for
t
in
tqdm
(
reversed
(
range
(
num_prediction_steps
)),
total
=
num_prediction_steps
):
# 1. predict noise model_output
# 1. predict noise model_output
with
torch
.
no_grad
():
with
torch
.
no_grad
():
model_output
=
self
.
unet
(
image
,
t
)
model_output
=
self
.
unet
(
image
,
t
)
...
...
src/diffusers/pipelines/glide/pipeline_glide.py
View file @
18200240
...
@@ -22,7 +22,7 @@ import torch
...
@@ -22,7 +22,7 @@ import torch
import
torch.utils.checkpoint
import
torch.utils.checkpoint
from
torch
import
nn
from
torch
import
nn
import
tqdm
from
tqdm.auto
import
tqdm
from
transformers
import
CLIPConfig
,
CLIPModel
,
CLIPTextConfig
,
CLIPVisionConfig
,
GPT2Tokenizer
from
transformers
import
CLIPConfig
,
CLIPModel
,
CLIPTextConfig
,
CLIPVisionConfig
,
GPT2Tokenizer
from
transformers.activations
import
ACT2FN
from
transformers.activations
import
ACT2FN
from
transformers.modeling_outputs
import
BaseModelOutput
,
BaseModelOutputWithPooling
from
transformers.modeling_outputs
import
BaseModelOutput
,
BaseModelOutputWithPooling
...
@@ -778,7 +778,7 @@ class GlidePipeline(DiffusionPipeline):
...
@@ -778,7 +778,7 @@ class GlidePipeline(DiffusionPipeline):
# 3. Run the text2image generation step
# 3. Run the text2image generation step
num_prediction_steps
=
len
(
self
.
text_scheduler
)
num_prediction_steps
=
len
(
self
.
text_scheduler
)
for
t
in
tqdm
.
tqdm
(
reversed
(
range
(
num_prediction_steps
)),
total
=
num_prediction_steps
):
for
t
in
tqdm
(
reversed
(
range
(
num_prediction_steps
)),
total
=
num_prediction_steps
):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
time_input
=
torch
.
tensor
([
t
]
*
image
.
shape
[
0
],
device
=
torch_device
)
time_input
=
torch
.
tensor
([
t
]
*
image
.
shape
[
0
],
device
=
torch_device
)
model_output
=
text_model_fn
(
image
,
time_input
,
transformer_out
)
model_output
=
text_model_fn
(
image
,
time_input
,
transformer_out
)
...
...
src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
View file @
18200240
...
@@ -5,7 +5,7 @@ import torch
...
@@ -5,7 +5,7 @@ import torch
import
torch.nn
as
nn
import
torch.nn
as
nn
import
torch.utils.checkpoint
import
torch.utils.checkpoint
import
tqdm
from
tqdm.auto
import
tqdm
from
transformers.activations
import
ACT2FN
from
transformers.activations
import
ACT2FN
from
transformers.configuration_utils
import
PretrainedConfig
from
transformers.configuration_utils
import
PretrainedConfig
from
transformers.modeling_outputs
import
BaseModelOutput
from
transformers.modeling_outputs
import
BaseModelOutput
...
@@ -599,7 +599,7 @@ class LatentDiffusionPipeline(DiffusionPipeline):
...
@@ -599,7 +599,7 @@ class LatentDiffusionPipeline(DiffusionPipeline):
# - eta -> η
# - eta -> η
# - pred_image_direction -> "direction pointingc to x_t"
# - pred_image_direction -> "direction pointingc to x_t"
# - pred_prev_image -> "x_t-1"
# - pred_prev_image -> "x_t-1"
for
t
in
tqdm
.
tqdm
(
reversed
(
range
(
num_inference_steps
)),
total
=
num_inference_steps
):
for
t
in
tqdm
(
reversed
(
range
(
num_inference_steps
)),
total
=
num_inference_steps
):
# guidance_scale of 1 means no guidance
# guidance_scale of 1 means no guidance
if
guidance_scale
==
1.0
:
if
guidance_scale
==
1.0
:
image_in
=
image
image_in
=
image
...
...
src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
View file @
18200240
import
torch
import
torch
import
tqdm
from
tqdm.auto
import
tqdm
from
...pipeline_utils
import
DiffusionPipeline
from
...pipeline_utils
import
DiffusionPipeline
...
@@ -35,7 +35,7 @@ class LatentDiffusionUncondPipeline(DiffusionPipeline):
...
@@ -35,7 +35,7 @@ class LatentDiffusionUncondPipeline(DiffusionPipeline):
self
.
scheduler
.
set_timesteps
(
num_inference_steps
)
self
.
scheduler
.
set_timesteps
(
num_inference_steps
)
for
t
in
tqdm
.
tqdm
(
self
.
scheduler
.
timesteps
):
for
t
in
tqdm
(
self
.
scheduler
.
timesteps
):
with
torch
.
no_grad
():
with
torch
.
no_grad
():
model_output
=
self
.
unet
(
image
,
t
)
model_output
=
self
.
unet
(
image
,
t
)
...
...
src/diffusers/pipelines/pndm/pipeline_pndm.py
View file @
18200240
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
import
torch
import
torch
import
tqdm
from
tqdm.auto
import
tqdm
from
...pipeline_utils
import
DiffusionPipeline
from
...pipeline_utils
import
DiffusionPipeline
...
@@ -43,7 +43,7 @@ class PNDMPipeline(DiffusionPipeline):
...
@@ -43,7 +43,7 @@ class PNDMPipeline(DiffusionPipeline):
image
=
image
.
to
(
torch_device
)
image
=
image
.
to
(
torch_device
)
prk_time_steps
=
self
.
scheduler
.
get_prk_time_steps
(
num_inference_steps
)
prk_time_steps
=
self
.
scheduler
.
get_prk_time_steps
(
num_inference_steps
)
for
t
in
tqdm
.
tqdm
(
range
(
len
(
prk_time_steps
))):
for
t
in
tqdm
(
range
(
len
(
prk_time_steps
))):
t_orig
=
prk_time_steps
[
t
]
t_orig
=
prk_time_steps
[
t
]
model_output
=
self
.
unet
(
image
,
t_orig
)
model_output
=
self
.
unet
(
image
,
t_orig
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment