Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
2b31740d
Commit
2b31740d
authored
Nov 20, 2022
by
Patrick von Platen
Browse files
Merge branch 'main' of
https://github.com/huggingface/diffusers
parents
63b34191
3bec90ff
Changes
24
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
325 additions
and
0 deletions
+325
-0
src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py
...rs/utils/dummy_torch_and_transformers_and_onnx_objects.py
+15
-0
tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint_legacy.py
...le_diffusion/test_onnx_stable_diffusion_inpaint_legacy.py
+95
-0
tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
...pelines/stable_diffusion/test_stable_diffusion_inpaint.py
+211
-0
utils/check_copies.py
utils/check_copies.py
+4
-0
No files found.
src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py
View file @
2b31740d
...
@@ -34,6 +34,21 @@ class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject):
...
@@ -34,6 +34,21 @@ class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject):
requires_backends
(
cls
,
[
"torch"
,
"transformers"
,
"onnx"
])
requires_backends
(
cls
,
[
"torch"
,
"transformers"
,
"onnx"
])
class
OnnxStableDiffusionInpaintPipelineLegacy
(
metaclass
=
DummyObject
):
_backends
=
[
"torch"
,
"transformers"
,
"onnx"
]
def
__init__
(
self
,
*
args
,
**
kwargs
):
requires_backends
(
self
,
[
"torch"
,
"transformers"
,
"onnx"
])
@
classmethod
def
from_config
(
cls
,
*
args
,
**
kwargs
):
requires_backends
(
cls
,
[
"torch"
,
"transformers"
,
"onnx"
])
@
classmethod
def
from_pretrained
(
cls
,
*
args
,
**
kwargs
):
requires_backends
(
cls
,
[
"torch"
,
"transformers"
,
"onnx"
])
class
OnnxStableDiffusionPipeline
(
metaclass
=
DummyObject
):
class
OnnxStableDiffusionPipeline
(
metaclass
=
DummyObject
):
_backends
=
[
"torch"
,
"transformers"
,
"onnx"
]
_backends
=
[
"torch"
,
"transformers"
,
"onnx"
]
...
...
tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint_legacy.py
0 → 100644
View file @
2b31740d
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
diffusers
import
OnnxStableDiffusionInpaintPipelineLegacy
from
diffusers.utils.testing_utils
import
(
is_onnx_available
,
load_image
,
load_numpy
,
require_onnxruntime
,
require_torch_gpu
,
slow
,
)
if
is_onnx_available
():
import
onnxruntime
as
ort
@
slow
@
require_onnxruntime
@
require_torch_gpu
class
StableDiffusionOnnxInpaintLegacyPipelineIntegrationTests
(
unittest
.
TestCase
):
@
property
def
gpu_provider
(
self
):
return
(
"CUDAExecutionProvider"
,
{
"gpu_mem_limit"
:
"15000000000"
,
# 15GB
"arena_extend_strategy"
:
"kSameAsRequested"
,
},
)
@
property
def
gpu_options
(
self
):
options
=
ort
.
SessionOptions
()
options
.
enable_mem_pattern
=
False
return
options
def
test_inference
(
self
):
init_image
=
load_image
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image
=
load_image
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
expected_image
=
load_numpy
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy"
)
# using the PNDM scheduler by default
pipe
=
OnnxStableDiffusionInpaintPipelineLegacy
.
from_pretrained
(
"CompVis/stable-diffusion-v1-4"
,
revision
=
"onnx"
,
provider
=
self
.
gpu_provider
,
sess_options
=
self
.
gpu_options
,
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
prompt
=
"A red cat sitting on a park bench"
generator
=
np
.
random
.
RandomState
(
0
)
output
=
pipe
(
prompt
=
prompt
,
init_image
=
init_image
,
mask_image
=
mask_image
,
strength
=
0.75
,
guidance_scale
=
7.5
,
num_inference_steps
=
15
,
generator
=
generator
,
output_type
=
"np"
,
)
image
=
output
.
images
[
0
]
assert
image
.
shape
==
(
512
,
512
,
3
)
assert
np
.
abs
(
expected_image
-
image
).
max
()
<
1e-2
tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
View file @
2b31740d
...
@@ -22,13 +22,17 @@ import torch
...
@@ -22,13 +22,17 @@ import torch
from
diffusers
import
(
from
diffusers
import
(
AutoencoderKL
,
AutoencoderKL
,
LMSDiscreteScheduler
,
PNDMScheduler
,
PNDMScheduler
,
StableDiffusionInpaintPipeline
,
StableDiffusionInpaintPipeline
,
UNet2DConditionModel
,
UNet2DConditionModel
,
UNet2DModel
,
UNet2DModel
,
VQModel
,
VQModel
,
)
)
from
diffusers.utils
import
floats_tensor
,
load_image
,
load_numpy
,
slow
,
torch_device
from
diffusers.utils
import
floats_tensor
,
load_image
,
load_numpy
,
slow
,
torch_device
from
diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint
import
prepare_mask_and_masked_image
from
diffusers.utils.testing_utils
import
require_torch_gpu
from
diffusers.utils.testing_utils
import
require_torch_gpu
from
PIL
import
Image
from
PIL
import
Image
from
transformers
import
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
from
transformers
import
CLIPTextConfig
,
CLIPTextModel
,
CLIPTokenizer
...
@@ -421,6 +425,45 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
...
@@ -421,6 +425,45 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
assert
image
.
shape
==
(
512
,
512
,
3
)
assert
image
.
shape
==
(
512
,
512
,
3
)
assert
np
.
abs
(
expected_image
-
image
).
max
()
<
1e-2
assert
np
.
abs
(
expected_image
-
image
).
max
()
<
1e-2
def
test_stable_diffusion_inpaint_pipeline_k_lms
(
self
):
init_image
=
load_image
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image
=
load_image
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
expected_image
=
load_numpy
(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint"
"/yellow_cat_sitting_on_a_park_bench_k_lms.npy"
)
model_id
=
"runwayml/stable-diffusion-inpainting"
pipe
=
StableDiffusionInpaintPipeline
.
from_pretrained
(
model_id
,
safety_checker
=
None
)
pipe
.
to
(
torch_device
)
# switch to LMS
pipe
.
scheduler
=
LMSDiscreteScheduler
.
from_config
(
pipe
.
scheduler
.
config
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
pipe
.
enable_attention_slicing
()
prompt
=
"Face of a yellow cat, high resolution, sitting on a park bench"
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
output
=
pipe
(
prompt
=
prompt
,
image
=
init_image
,
mask_image
=
mask_image
,
generator
=
generator
,
output_type
=
"np"
,
)
image
=
output
.
images
[
0
]
assert
image
.
shape
==
(
512
,
512
,
3
)
assert
np
.
abs
(
expected_image
-
image
).
max
()
<
1e-2
@
unittest
.
skipIf
(
torch_device
==
"cpu"
,
"This test is supposed to run on GPU"
)
@
unittest
.
skipIf
(
torch_device
==
"cpu"
,
"This test is supposed to run on GPU"
)
def
test_stable_diffusion_pipeline_with_sequential_cpu_offloading
(
self
):
def
test_stable_diffusion_pipeline_with_sequential_cpu_offloading
(
self
):
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
empty_cache
()
...
@@ -466,3 +509,171 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
...
@@ -466,3 +509,171 @@ class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
mem_bytes
=
torch
.
cuda
.
max_memory_allocated
()
mem_bytes
=
torch
.
cuda
.
max_memory_allocated
()
# make sure that less than 2.2 GB is allocated
# make sure that less than 2.2 GB is allocated
assert
mem_bytes
<
2.2
*
10
**
9
assert
mem_bytes
<
2.2
*
10
**
9
class
StableDiffusionInpaintingPrepareMaskAndMaskedImageTests
(
unittest
.
TestCase
):
def
test_pil_inputs
(
self
):
im
=
np
.
random
.
randint
(
0
,
255
,
(
32
,
32
,
3
),
dtype
=
np
.
uint8
)
im
=
Image
.
fromarray
(
im
)
mask
=
np
.
random
.
randint
(
0
,
255
,
(
32
,
32
),
dtype
=
np
.
uint8
)
>
127.5
mask
=
Image
.
fromarray
((
mask
*
255
).
astype
(
np
.
uint8
))
t_mask
,
t_masked
=
prepare_mask_and_masked_image
(
im
,
mask
)
self
.
assertTrue
(
isinstance
(
t_mask
,
torch
.
Tensor
))
self
.
assertTrue
(
isinstance
(
t_masked
,
torch
.
Tensor
))
self
.
assertEqual
(
t_mask
.
ndim
,
4
)
self
.
assertEqual
(
t_masked
.
ndim
,
4
)
self
.
assertEqual
(
t_mask
.
shape
,
(
1
,
1
,
32
,
32
))
self
.
assertEqual
(
t_masked
.
shape
,
(
1
,
3
,
32
,
32
))
self
.
assertTrue
(
t_mask
.
dtype
==
torch
.
float32
)
self
.
assertTrue
(
t_masked
.
dtype
==
torch
.
float32
)
self
.
assertTrue
(
t_mask
.
min
()
>=
0.0
)
self
.
assertTrue
(
t_mask
.
max
()
<=
1.0
)
self
.
assertTrue
(
t_masked
.
min
()
>=
-
1.0
)
self
.
assertTrue
(
t_masked
.
min
()
<=
1.0
)
self
.
assertTrue
(
t_mask
.
sum
()
>
0.0
)
def
test_np_inputs
(
self
):
im_np
=
np
.
random
.
randint
(
0
,
255
,
(
32
,
32
,
3
),
dtype
=
np
.
uint8
)
im_pil
=
Image
.
fromarray
(
im_np
)
mask_np
=
np
.
random
.
randint
(
0
,
255
,
(
32
,
32
),
dtype
=
np
.
uint8
)
>
127.5
mask_pil
=
Image
.
fromarray
((
mask_np
*
255
).
astype
(
np
.
uint8
))
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
t_mask_pil
,
t_masked_pil
=
prepare_mask_and_masked_image
(
im_pil
,
mask_pil
)
self
.
assertTrue
((
t_mask_np
==
t_mask_pil
).
all
())
self
.
assertTrue
((
t_masked_np
==
t_masked_pil
).
all
())
def
test_torch_3D_2D_inputs
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_np
=
im_tensor
.
numpy
().
transpose
(
1
,
2
,
0
)
mask_np
=
mask_tensor
.
numpy
()
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_3D_3D_inputs
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_np
=
im_tensor
.
numpy
().
transpose
(
1
,
2
,
0
)
mask_np
=
mask_tensor
.
numpy
()[
0
]
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_4D_2D_inputs
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_np
=
im_tensor
.
numpy
()[
0
].
transpose
(
1
,
2
,
0
)
mask_np
=
mask_tensor
.
numpy
()
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_4D_3D_inputs
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_np
=
im_tensor
.
numpy
()[
0
].
transpose
(
1
,
2
,
0
)
mask_np
=
mask_tensor
.
numpy
()[
0
]
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_4D_4D_inputs
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
1
,
1
,
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_np
=
im_tensor
.
numpy
()[
0
].
transpose
(
1
,
2
,
0
)
mask_np
=
mask_tensor
.
numpy
()[
0
][
0
]
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
t_mask_np
,
t_masked_np
=
prepare_mask_and_masked_image
(
im_np
,
mask_np
)
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_batch_4D_3D
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
2
,
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
2
,
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_nps
=
[
im
.
numpy
().
transpose
(
1
,
2
,
0
)
for
im
in
im_tensor
]
mask_nps
=
[
mask
.
numpy
()
for
mask
in
mask_tensor
]
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
nps
=
[
prepare_mask_and_masked_image
(
i
,
m
)
for
i
,
m
in
zip
(
im_nps
,
mask_nps
)]
t_mask_np
=
torch
.
cat
([
n
[
0
]
for
n
in
nps
])
t_masked_np
=
torch
.
cat
([
n
[
1
]
for
n
in
nps
])
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_torch_batch_4D_4D
(
self
):
im_tensor
=
torch
.
randint
(
0
,
255
,
(
2
,
3
,
32
,
32
),
dtype
=
torch
.
uint8
)
mask_tensor
=
torch
.
randint
(
0
,
255
,
(
2
,
1
,
32
,
32
),
dtype
=
torch
.
uint8
)
>
127.5
im_nps
=
[
im
.
numpy
().
transpose
(
1
,
2
,
0
)
for
im
in
im_tensor
]
mask_nps
=
[
mask
.
numpy
()[
0
]
for
mask
in
mask_tensor
]
t_mask_tensor
,
t_masked_tensor
=
prepare_mask_and_masked_image
(
im_tensor
/
127.5
-
1
,
mask_tensor
)
nps
=
[
prepare_mask_and_masked_image
(
i
,
m
)
for
i
,
m
in
zip
(
im_nps
,
mask_nps
)]
t_mask_np
=
torch
.
cat
([
n
[
0
]
for
n
in
nps
])
t_masked_np
=
torch
.
cat
([
n
[
1
]
for
n
in
nps
])
self
.
assertTrue
((
t_mask_tensor
==
t_mask_np
).
all
())
self
.
assertTrue
((
t_masked_tensor
==
t_masked_np
).
all
())
def
test_shape_mismatch
(
self
):
# test height and width
with
self
.
assertRaises
(
AssertionError
):
prepare_mask_and_masked_image
(
torch
.
randn
(
3
,
32
,
32
),
torch
.
randn
(
64
,
64
))
# test batch dim
with
self
.
assertRaises
(
AssertionError
):
prepare_mask_and_masked_image
(
torch
.
randn
(
2
,
3
,
32
,
32
),
torch
.
randn
(
4
,
64
,
64
))
# test batch dim
with
self
.
assertRaises
(
AssertionError
):
prepare_mask_and_masked_image
(
torch
.
randn
(
2
,
3
,
32
,
32
),
torch
.
randn
(
4
,
1
,
64
,
64
))
def
test_type_mismatch
(
self
):
# test tensors-only
with
self
.
assertRaises
(
TypeError
):
prepare_mask_and_masked_image
(
torch
.
rand
(
3
,
32
,
32
),
torch
.
rand
(
3
,
32
,
32
).
numpy
())
# test tensors-only
with
self
.
assertRaises
(
TypeError
):
prepare_mask_and_masked_image
(
torch
.
rand
(
3
,
32
,
32
).
numpy
(),
torch
.
rand
(
3
,
32
,
32
))
def
test_channels_first
(
self
):
# test channels first for 3D tensors
with
self
.
assertRaises
(
AssertionError
):
prepare_mask_and_masked_image
(
torch
.
rand
(
32
,
32
,
3
),
torch
.
rand
(
3
,
32
,
32
))
def
test_tensor_range
(
self
):
# test im <= 1
with
self
.
assertRaises
(
ValueError
):
prepare_mask_and_masked_image
(
torch
.
ones
(
3
,
32
,
32
)
*
2
,
torch
.
rand
(
32
,
32
))
# test im >= -1
with
self
.
assertRaises
(
ValueError
):
prepare_mask_and_masked_image
(
torch
.
ones
(
3
,
32
,
32
)
*
(
-
2
),
torch
.
rand
(
32
,
32
))
# test mask <= 1
with
self
.
assertRaises
(
ValueError
):
prepare_mask_and_masked_image
(
torch
.
rand
(
3
,
32
,
32
),
torch
.
ones
(
32
,
32
)
*
2
)
# test mask >= 0
with
self
.
assertRaises
(
ValueError
):
prepare_mask_and_masked_image
(
torch
.
rand
(
3
,
32
,
32
),
torch
.
ones
(
32
,
32
)
*
-
1
)
\ No newline at end of file
utils/check_copies.py
View file @
2b31740d
...
@@ -153,6 +153,10 @@ def is_copy_consistent(filename, overwrite=False):
...
@@ -153,6 +153,10 @@ def is_copy_consistent(filename, overwrite=False):
observed_code_lines
=
lines
[
start_index
:
line_index
]
observed_code_lines
=
lines
[
start_index
:
line_index
]
observed_code
=
""
.
join
(
observed_code_lines
)
observed_code
=
""
.
join
(
observed_code_lines
)
# Remove any nested `Copied from` comments to avoid circular copies
theoretical_code
=
[
line
for
line
in
theoretical_code
.
split
(
"
\n
"
)
if
_re_copy_warning
.
search
(
line
)
is
None
]
theoretical_code
=
"
\n
"
.
join
(
theoretical_code
)
# Before comparing, use the `replace_pattern` on the original code.
# Before comparing, use the `replace_pattern` on the original code.
if
len
(
replace_pattern
)
>
0
:
if
len
(
replace_pattern
)
>
0
:
patterns
=
replace_pattern
.
replace
(
"with"
,
""
).
split
(
","
)
patterns
=
replace_pattern
.
replace
(
"with"
,
""
).
split
(
","
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment