Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
f5ca5af6
Commit
f5ca5af6
authored
Jul 15, 2022
by
Patrick von Platen
Browse files
add to readme
parent
2ac19ff1
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
22 additions
and
75 deletions
+22
-75
README.md
README.md
+22
-75
No files found.
README.md
View file @
f5ca5af6
...
@@ -82,72 +82,22 @@ For more examples see [schedulers](https://github.com/huggingface/diffusers/tree
...
@@ -82,72 +82,22 @@ For more examples see [schedulers](https://github.com/huggingface/diffusers/tree
```
python
```
python
import
torch
import
torch
from
diffusers
import
UNetModel
,
DD
P
MScheduler
from
diffusers
import
UNet
Unconditional
Model
,
DD
I
MScheduler
import
PIL
import
PIL
.Image
import
numpy
as
np
import
numpy
as
np
import
tqdm
import
tqdm
generator
=
torch
.
manual_seed
(
0
)
torch_device
=
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
torch_device
=
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
# 1. Load models
# 1. Load models
noise_
scheduler
=
DD
P
MScheduler
.
from_config
(
"fusing/ddpm-
lsun-church
"
,
tensor_format
=
"pt"
)
scheduler
=
DD
I
MScheduler
.
from_config
(
"fusing/ddpm-
celeba-hq
"
,
tensor_format
=
"pt"
)
unet
=
UNetModel
.
from_pretrained
(
"fusing/ddpm-
lsun-church"
).
to
(
torch_device
)
unet
=
UNet
Unconditional
Model
.
from_pretrained
(
"fusing/ddpm-
celeba-hq"
,
ddpm
=
True
).
to
(
torch_device
)
# 2. Sample gaussian noise
# 2. Sample gaussian noise
generator
=
torch
.
manual_seed
(
23
)
unet
.
image_size
=
unet
.
resolution
image
=
torch
.
randn
(
image
=
torch
.
randn
(
(
1
,
unet
.
in_channels
,
unet
.
resolution
,
unet
.
resolution
),
(
1
,
unet
.
in_channels
,
unet
.
image_size
,
unet
.
image_size
),
generator
=
generator
,
)
image
=
image
.
to
(
torch_device
)
# 3. Denoise
num_prediction_steps
=
len
(
noise_scheduler
)
for
t
in
tqdm
.
tqdm
(
reversed
(
range
(
num_prediction_steps
)),
total
=
num_prediction_steps
):
# predict noise residual
with
torch
.
no_grad
():
residual
=
unet
(
image
,
t
)
# predict previous mean of image x_t-1
pred_prev_image
=
noise_scheduler
.
step
(
residual
,
image
,
t
)
# optionally sample variance
variance
=
0
if
t
>
0
:
noise
=
torch
.
randn
(
image
.
shape
,
generator
=
generator
).
to
(
image
.
device
)
variance
=
noise_scheduler
.
get_variance
(
t
).
sqrt
()
*
noise
# set current image to prev_image: x_t -> x_t-1
image
=
pred_prev_image
+
variance
# 5. process image to PIL
image_processed
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
)
image_processed
=
(
image_processed
+
1.0
)
*
127.5
image_processed
=
image_processed
.
numpy
().
astype
(
np
.
uint8
)
image_pil
=
PIL
.
Image
.
fromarray
(
image_processed
[
0
])
# 6. save image
image_pil
.
save
(
"test.png"
)
```
#### **Example for Unconditonal Image generation [LDM](https://github.com/CompVis/latent-diffusion):**
```
python
import
torch
from
diffusers
import
UNetModel
,
DDIMScheduler
import
PIL
import
numpy
as
np
import
tqdm
generator
=
torch
.
manual_seed
(
0
)
torch_device
=
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
# 1. Load models
noise_scheduler
=
DDIMScheduler
.
from_config
(
"fusing/ddpm-celeba-hq"
,
tensor_format
=
"pt"
)
unet
=
UNetModel
.
from_pretrained
(
"fusing/ddpm-celeba-hq"
).
to
(
torch_device
)
# 2. Sample gaussian noise
image
=
torch
.
randn
(
(
1
,
unet
.
in_channels
,
unet
.
resolution
,
unet
.
resolution
),
generator
=
generator
,
generator
=
generator
,
)
)
image
=
image
.
to
(
torch_device
)
image
=
image
.
to
(
torch_device
)
...
@@ -155,32 +105,29 @@ image = image.to(torch_device)
...
@@ -155,32 +105,29 @@ image = image.to(torch_device)
# 3. Denoise
# 3. Denoise
num_inference_steps
=
50
num_inference_steps
=
50
eta
=
0.0
# <- deterministic sampling
eta
=
0.0
# <- deterministic sampling
scheduler
.
set_timesteps
(
num_inference_steps
)
for
t
in
tqdm
.
tqdm
(
reversed
(
range
(
num_inference_steps
)),
total
=
num_inference_
steps
):
for
t
in
tqdm
.
tqdm
(
scheduler
.
time
steps
):
# 1. predict noise residual
# 1. predict noise residual
orig_t
=
len
(
noise_scheduler
)
//
num_inference_steps
*
t
with
torch
.
no_grad
():
with
torch
.
no_grad
():
residual
=
unet
(
image
,
orig_t
)
residual
=
unet
(
image
,
t
)[
"sample"
]
# 2. predict previous mean of image x_t-1
prev_image
=
scheduler
.
step
(
residual
,
t
,
image
,
eta
)[
"prev_sample"
]
pred_prev_image
=
noise_scheduler
.
step
(
residual
,
image
,
t
,
num_inference_steps
,
eta
)
# 3. optionally sample variance
# 3. set current image to prev_image: x_t -> x_t-1
variance
=
0
image
=
prev_image
if
eta
>
0
:
noise
=
torch
.
randn
(
image
.
shape
,
generator
=
generator
).
to
(
image
.
device
)
variance
=
noise_scheduler
.
get_variance
(
t
).
sqrt
()
*
eta
*
noise
# 4. set current image to prev_image: x_t -> x_t-1
# 4. process image to PIL
image
=
pred_prev_image
+
variance
# 5. process image to PIL
image_processed
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
)
image_processed
=
image
.
cpu
().
permute
(
0
,
2
,
3
,
1
)
image_processed
=
(
image_processed
+
1.0
)
*
127.5
image_processed
=
(
image_processed
+
1.0
)
*
127.5
image_processed
=
image_processed
.
numpy
().
astype
(
np
.
uint8
)
image_processed
=
image_processed
.
numpy
().
astype
(
np
.
uint8
)
image_pil
=
PIL
.
Image
.
fromarray
(
image_processed
[
0
])
image_pil
=
PIL
.
Image
.
fromarray
(
image_processed
[
0
])
# 6. save image
# 5. save image
image_pil
.
save
(
"test.png"
)
image_pil
.
save
(
"generated_image.png"
)
```
#### **Example for Unconditonal Image generation [LDM](https://github.com/CompVis/latent-diffusion):**
```
python
```
```
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment