Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
60892c55
Unverified
Commit
60892c55
authored
Apr 30, 2025
by
Yao Matrix
Committed by
GitHub
Apr 30, 2025
Browse files
enable marigold_intrinsics cases on XPU (#11445)
Signed-off-by:
Yao Matrix
<
matrix.yao@intel.com
>
parent
8fe5a14d
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
22 additions
and
21 deletions
+22
-21
tests/pipelines/marigold/test_marigold_intrinsics.py
tests/pipelines/marigold/test_marigold_intrinsics.py
+22
-21
No files found.
tests/pipelines/marigold/test_marigold_intrinsics.py
View file @
60892c55
...
@@ -33,10 +33,11 @@ from diffusers import (
...
@@ -33,10 +33,11 @@ from diffusers import (
UNet2DConditionModel
,
UNet2DConditionModel
,
)
)
from
diffusers.utils.testing_utils
import
(
from
diffusers.utils.testing_utils
import
(
backend_empty_cache
,
enable_full_determinism
,
enable_full_determinism
,
floats_tensor
,
floats_tensor
,
load_image
,
load_image
,
require_torch_
gpu
,
require_torch_
accelerator
,
slow
,
slow
,
torch_device
,
torch_device
,
)
)
...
@@ -395,17 +396,17 @@ class MarigoldIntrinsicsPipelineFastTests(MarigoldIntrinsicsPipelineTesterMixin,
...
@@ -395,17 +396,17 @@ class MarigoldIntrinsicsPipelineFastTests(MarigoldIntrinsicsPipelineTesterMixin,
@
slow
@
slow
@
require_torch_
gpu
@
require_torch_
accelerator
class
MarigoldIntrinsicsPipelineIntegrationTests
(
unittest
.
TestCase
):
class
MarigoldIntrinsicsPipelineIntegrationTests
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
super
().
setUp
()
super
().
setUp
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
tearDown
(
self
):
def
tearDown
(
self
):
super
().
tearDown
()
super
().
tearDown
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
_test_marigold_intrinsics
(
def
_test_marigold_intrinsics
(
self
,
self
,
...
@@ -424,7 +425,7 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -424,7 +425,7 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
from_pretrained_kwargs
[
"torch_dtype"
]
=
torch
.
float16
from_pretrained_kwargs
[
"torch_dtype"
]
=
torch
.
float16
pipe
=
MarigoldIntrinsicsPipeline
.
from_pretrained
(
model_id
,
**
from_pretrained_kwargs
)
pipe
=
MarigoldIntrinsicsPipeline
.
from_pretrained
(
model_id
,
**
from_pretrained_kwargs
)
if
device
==
"cuda"
:
if
device
in
[
"cuda"
,
"xpu"
]
:
pipe
.
enable_model_cpu_offload
()
pipe
.
enable_model_cpu_offload
()
pipe
.
set_progress_bar_config
(
disable
=
None
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
...
@@ -464,10 +465,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -464,10 +465,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f32_
cuda
_G0_S1_P768_E1_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f32_
accelerator
_G0_S1_P768_E1_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
False
,
is_fp16
=
False
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.62127
,
0.61906
,
0.61687
,
0.61946
,
0.61903
,
0.61961
,
0.61808
,
0.62099
,
0.62894
]),
expected_slice
=
np
.
array
([
0.62127
,
0.61906
,
0.61687
,
0.61946
,
0.61903
,
0.61961
,
0.61808
,
0.62099
,
0.62894
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -477,10 +478,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -477,10 +478,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S1_P768_E1_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S1_P768_E1_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.62109
,
0.61914
,
0.61719
,
0.61963
,
0.61914
,
0.61963
,
0.61816
,
0.62109
,
0.62891
]),
expected_slice
=
np
.
array
([
0.62109
,
0.61914
,
0.61719
,
0.61963
,
0.61914
,
0.61963
,
0.61816
,
0.62109
,
0.62891
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -490,10 +491,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -490,10 +491,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G2024_S1_P768_E1_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G2024_S1_P768_E1_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
2024
,
generator_seed
=
2024
,
expected_slice
=
np
.
array
([
0.64111
,
0.63916
,
0.63623
,
0.63965
,
0.63916
,
0.63965
,
0.6377
,
0.64062
,
0.64941
]),
expected_slice
=
np
.
array
([
0.64111
,
0.63916
,
0.63623
,
0.63965
,
0.63916
,
0.63965
,
0.6377
,
0.64062
,
0.64941
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -503,10 +504,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -503,10 +504,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S2_P768_E1_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S2_P768_E1_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.60254
,
0.60059
,
0.59961
,
0.60156
,
0.60107
,
0.60205
,
0.60254
,
0.60449
,
0.61133
]),
expected_slice
=
np
.
array
([
0.60254
,
0.60059
,
0.59961
,
0.60156
,
0.60107
,
0.60205
,
0.60254
,
0.60449
,
0.61133
]),
num_inference_steps
=
2
,
num_inference_steps
=
2
,
...
@@ -516,10 +517,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -516,10 +517,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S1_P512_E1_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S1_P512_E1_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.64551
,
0.64453
,
0.64404
,
0.64502
,
0.64844
,
0.65039
,
0.64502
,
0.65039
,
0.65332
]),
expected_slice
=
np
.
array
([
0.64551
,
0.64453
,
0.64404
,
0.64502
,
0.64844
,
0.65039
,
0.64502
,
0.65039
,
0.65332
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -529,10 +530,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -529,10 +530,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S1_P768_E3_B1_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S1_P768_E3_B1_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.61572
,
0.61377
,
0.61182
,
0.61426
,
0.61377
,
0.61426
,
0.61279
,
0.61572
,
0.62354
]),
expected_slice
=
np
.
array
([
0.61572
,
0.61377
,
0.61182
,
0.61426
,
0.61377
,
0.61426
,
0.61279
,
0.61572
,
0.62354
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -543,10 +544,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -543,10 +544,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S1_P768_E4_B2_M1
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S1_P768_E4_B2_M1
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.61914
,
0.6167
,
0.61475
,
0.61719
,
0.61719
,
0.61768
,
0.61572
,
0.61914
,
0.62695
]),
expected_slice
=
np
.
array
([
0.61914
,
0.6167
,
0.61475
,
0.61719
,
0.61719
,
0.61768
,
0.61572
,
0.61914
,
0.62695
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
@@ -557,10 +558,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
...
@@ -557,10 +558,10 @@ class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase):
match_input_resolution
=
True
,
match_input_resolution
=
True
,
)
)
def
test_marigold_intrinsics_einstein_f16_
cuda
_G0_S1_P512_E1_B1_M0
(
self
):
def
test_marigold_intrinsics_einstein_f16_
accelerator
_G0_S1_P512_E1_B1_M0
(
self
):
self
.
_test_marigold_intrinsics
(
self
.
_test_marigold_intrinsics
(
is_fp16
=
True
,
is_fp16
=
True
,
device
=
"cuda"
,
device
=
torch_device
,
generator_seed
=
0
,
generator_seed
=
0
,
expected_slice
=
np
.
array
([
0.65332
,
0.64697
,
0.64648
,
0.64844
,
0.64697
,
0.64111
,
0.64941
,
0.64209
,
0.65332
]),
expected_slice
=
np
.
array
([
0.65332
,
0.64697
,
0.64648
,
0.64844
,
0.64697
,
0.64111
,
0.64941
,
0.64209
,
0.65332
]),
num_inference_steps
=
1
,
num_inference_steps
=
1
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment