Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
fengzch-das
nunchaku
Commits
c00fb4b7
Commit
c00fb4b7
authored
Apr 12, 2025
by
muyangli
Browse files
finalize the ci
parent
3ecf0d25
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
27 additions
and
27 deletions
+27
-27
tests/flux/test_flux_cache.py
tests/flux/test_flux_cache.py
+1
-1
tests/flux/test_flux_dev.py
tests/flux/test_flux_dev.py
+2
-2
tests/flux/test_flux_dev_loras.py
tests/flux/test_flux_dev_loras.py
+7
-7
tests/flux/test_flux_schnell.py
tests/flux/test_flux_schnell.py
+4
-4
tests/flux/test_flux_tools.py
tests/flux/test_flux_tools.py
+11
-11
tests/flux/test_shuttle_jaguar.py
tests/flux/test_shuttle_jaguar.py
+1
-1
tests/flux/utils.py
tests/flux/utils.py
+1
-1
No files found.
tests/flux/test_flux_cache.py
View file @
c00fb4b7
...
...
@@ -8,7 +8,7 @@ from .utils import run_test
@
pytest
.
mark
.
parametrize
(
"cache_threshold,height,width,num_inference_steps,lora_name,lora_strength,expected_lpips"
,
[
(
0.12
,
1024
,
1024
,
30
,
None
,
1
,
0.2
6
),
(
0.12
,
1024
,
1024
,
30
,
None
,
1
,
0.2
12
),
],
)
def
test_flux_dev_loras
(
...
...
tests/flux/test_flux_dev.py
View file @
c00fb4b7
...
...
@@ -8,8 +8,8 @@ from .utils import run_test
@
pytest
.
mark
.
parametrize
(
"height,width,num_inference_steps,attention_impl,cpu_offload,expected_lpips"
,
[
(
1024
,
1024
,
50
,
"flashattn2"
,
False
,
0.
226
),
(
2048
,
512
,
25
,
"nunchaku-fp16"
,
False
,
0.
243
),
(
1024
,
1024
,
50
,
"flashattn2"
,
False
,
0.
139
),
(
2048
,
512
,
25
,
"nunchaku-fp16"
,
False
,
0.
148
),
],
)
def
test_flux_dev
(
...
...
tests/flux/test_flux_dev_loras.py
View file @
c00fb4b7
...
...
@@ -8,10 +8,10 @@ from .utils import run_test
@
pytest
.
mark
.
parametrize
(
"num_inference_steps,lora_name,lora_strength,cpu_offload,expected_lpips"
,
[
(
25
,
"realism"
,
0.9
,
True
,
0.1
78
),
(
25
,
"ghibsky"
,
1
,
False
,
0.16
4
),
(
25
,
"realism"
,
0.9
,
True
,
0.1
36
),
(
25
,
"ghibsky"
,
1
,
False
,
0.1
8
6
),
# (28, "anime", 1, False, 0.284),
(
24
,
"sketch"
,
1
,
True
,
0.2
23
),
(
24
,
"sketch"
,
1
,
True
,
0.2
60
),
# (28, "yarn", 1, False, 0.211),
# (25, "haunted_linework", 1, True, 0.317),
],
...
...
@@ -51,7 +51,7 @@ def test_flux_dev_hypersd8_1536x2048():
lora_names
=
"hypersd8"
,
lora_strengths
=
0.125
,
cache_threshold
=
0
,
expected_lpips
=
0.
291
,
expected_lpips
=
0.
164
,
)
...
...
@@ -71,7 +71,7 @@ def test_flux_dev_turbo8_1024x1920():
lora_names
=
"turbo8"
,
lora_strengths
=
1
,
cache_threshold
=
0
,
expected_lpips
=
0.1
89
,
expected_lpips
=
0.1
20
,
)
...
...
@@ -91,7 +91,7 @@ def test_flux_dev_turbo8_yarn_2048x1024():
lora_names
=
[
"turbo8"
,
"yarn"
],
lora_strengths
=
[
1
,
1
],
cache_threshold
=
0
,
expected_lpips
=
0.25
2
,
expected_lpips
=
0.25
5
,
)
...
...
@@ -111,5 +111,5 @@ def test_flux_dev_turbo8_yarn_1024x1024():
lora_names
=
[
"realism"
,
"ghibsky"
,
"anime"
,
"sketch"
,
"yarn"
,
"haunted_linework"
,
"turbo8"
],
lora_strengths
=
[
0
,
0
,
0
,
0
,
0
,
1
,
1
],
cache_threshold
=
0
,
expected_lpips
=
0.
44
,
expected_lpips
=
0.
310
,
)
tests/flux/test_flux_schnell.py
View file @
c00fb4b7
...
...
@@ -8,10 +8,10 @@ from .utils import run_test
@
pytest
.
mark
.
parametrize
(
"height,width,attention_impl,cpu_offload,expected_lpips"
,
[
(
1024
,
1024
,
"flashattn2"
,
False
,
0.
250
),
(
1024
,
1024
,
"nunchaku-fp16"
,
False
,
0.
255
),
(
1920
,
1080
,
"nunchaku-fp16"
,
False
,
0.
253
),
(
2048
,
2048
,
"nunchaku-fp16"
,
True
,
0.
274
),
(
1024
,
1024
,
"flashattn2"
,
False
,
0.
126
),
(
1024
,
1024
,
"nunchaku-fp16"
,
False
,
0.
126
),
(
1920
,
1080
,
"nunchaku-fp16"
,
False
,
0.
141
),
(
2048
,
2048
,
"nunchaku-fp16"
,
True
,
0.
166
),
],
)
def
test_int4_schnell
(
height
:
int
,
width
:
int
,
attention_impl
:
str
,
cpu_offload
:
bool
,
expected_lpips
:
float
):
...
...
tests/flux/test_flux_tools.py
View file @
c00fb4b7
...
...
@@ -15,12 +15,12 @@ def test_flux_canny_dev():
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
5
0
,
num_inference_steps
=
3
0
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.
103
if
get_precision
()
==
"int4"
else
0.164
,
expected_lpips
=
0.
076
if
get_precision
()
==
"int4"
else
0.164
,
)
...
...
@@ -39,7 +39,7 @@ def test_flux_depth_dev():
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.17
0
if
get_precision
()
==
"int4"
else
0.120
,
expected_lpips
=
0.1
3
7
if
get_precision
()
==
"int4"
else
0.120
,
)
...
...
@@ -53,12 +53,12 @@ def test_flux_fill_dev():
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
5
0
,
num_inference_steps
=
3
0
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.04
5
,
expected_lpips
=
0.04
6
,
)
...
...
@@ -72,14 +72,14 @@ def test_flux_dev_canny_lora():
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
5
0
,
num_inference_steps
=
3
0
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
lora_names
=
"canny"
,
lora_strengths
=
0.85
,
cache_threshold
=
0
,
expected_lpips
=
0.
103
,
expected_lpips
=
0.
081
,
)
...
...
@@ -100,7 +100,7 @@ def test_flux_dev_depth_lora():
cache_threshold
=
0
,
lora_names
=
"depth"
,
lora_strengths
=
0.85
,
expected_lpips
=
0.1
63
,
expected_lpips
=
0.1
81
,
)
...
...
@@ -121,7 +121,7 @@ def test_flux_fill_dev_turbo():
cache_threshold
=
0
,
lora_names
=
"turbo8"
,
lora_strengths
=
1
,
expected_lpips
=
0.0
48
,
expected_lpips
=
0.0
36
,
)
...
...
@@ -135,10 +135,10 @@ def test_flux_dev_redux():
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
5
0
,
num_inference_steps
=
2
0
,
guidance_scale
=
2.5
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
(
0.1
98
if
get_precision
()
==
"int4"
else
0.198
),
expected_lpips
=
(
0.1
43
if
get_precision
()
==
"int4"
else
0.198
),
)
tests/flux/test_shuttle_jaguar.py
View file @
c00fb4b7
...
...
@@ -6,7 +6,7 @@ from nunchaku.utils import get_precision, is_turing
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
@
pytest
.
mark
.
parametrize
(
"height,width,attention_impl,cpu_offload,expected_lpips"
,
[(
1024
,
1024
,
"nunchaku-fp16"
,
False
,
0.
25
)]
"height,width,attention_impl,cpu_offload,expected_lpips"
,
[(
1024
,
1024
,
"nunchaku-fp16"
,
False
,
0.
186
)]
)
def
test_shuttle_jaguar
(
height
:
int
,
width
:
int
,
attention_impl
:
str
,
cpu_offload
:
bool
,
expected_lpips
:
float
):
run_test
(
...
...
tests/flux/utils.py
View file @
c00fb4b7
...
...
@@ -134,7 +134,7 @@ def run_test(
cache_threshold
:
float
=
0
,
lora_names
:
str
|
list
[
str
]
|
None
=
None
,
lora_strengths
:
float
|
list
[
float
]
=
1.0
,
max_dataset_size
:
int
=
8
,
max_dataset_size
:
int
=
4
,
i2f_mode
:
str
|
None
=
None
,
expected_lpips
:
float
=
0.5
,
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment