Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
fengzch-das
nunchaku
Commits
007982e7
Commit
007982e7
authored
Apr 15, 2025
by
muyangli
Browse files
update
parent
b353ce5b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
64 deletions
+5
-64
tests/flux/test_flux_cache.py
tests/flux/test_flux_cache.py
+1
-1
tests/flux/test_flux_dev_loras.py
tests/flux/test_flux_dev_loras.py
+4
-63
No files found.
tests/flux/test_flux_cache.py
View file @
007982e7
...
...
@@ -11,7 +11,7 @@ from .utils import run_test
(
0.12
,
1024
,
1024
,
30
,
None
,
1
,
0.212
),
],
)
def
test_flux_dev_
loras
(
def
test_flux_dev_
cache
(
cache_threshold
:
float
,
height
:
int
,
width
:
int
,
...
...
tests/flux/test_flux_dev_loras.py
View file @
007982e7
...
...
@@ -9,9 +9,9 @@ from .utils import run_test
"num_inference_steps,lora_name,lora_strength,cpu_offload,expected_lpips"
,
[
(
25
,
"realism"
,
0.9
,
True
,
0.136
),
(
25
,
"ghibsky"
,
1
,
False
,
0.186
),
#
(25, "ghibsky", 1, False, 0.186),
# (28, "anime", 1, False, 0.284),
(
24
,
"sketch"
,
1
,
True
,
0.2
60
),
(
24
,
"sketch"
,
1
,
True
,
0.2
91
),
# (28, "yarn", 1, False, 0.211),
# (25, "haunted_linework", 1, True, 0.317),
],
...
...
@@ -35,68 +35,9 @@ def test_flux_dev_loras(num_inference_steps, lora_name, lora_strength, cpu_offlo
)
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_dev_hypersd8_1536x2048():
# run_test(
# precision=get_precision(),
# model_name="flux.1-dev",
# dataset_name="MJHQ",
# height=1536,
# width=2048,
# num_inference_steps=8,
# guidance_scale=3.5,
# use_qencoder=False,
# attention_impl="nunchaku-fp16",
# cpu_offload=True,
# lora_names="hypersd8",
# lora_strengths=0.125,
# cache_threshold=0,
# expected_lpips=0.164,
# )
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_dev_turbo8_1024x1920
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-dev"
,
dataset_name
=
"MJHQ"
,
height
=
1024
,
width
=
1920
,
num_inference_steps
=
8
,
guidance_scale
=
3.5
,
use_qencoder
=
False
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
True
,
lora_names
=
"turbo8"
,
lora_strengths
=
1
,
cache_threshold
=
0
,
expected_lpips
=
0.151
,
)
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_dev_turbo8_yarn_2048x1024():
# run_test(
# precision=get_precision(),
# model_name="flux.1-dev",
# dataset_name="yarn",
# height=2048,
# width=1024,
# num_inference_steps=8,
# guidance_scale=3.5,
# use_qencoder=False,
# cpu_offload=True,
# lora_names=["turbo8", "yarn"],
# lora_strengths=[1, 1],
# cache_threshold=0,
# expected_lpips=0.255,
# )
# lora composition & large rank loras
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_dev_turbo8_
yarn
_1024x1024
():
def
test_flux_dev_turbo8_
ghibsky
_1024x1024
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-dev"
,
...
...
@@ -108,7 +49,7 @@ def test_flux_dev_turbo8_yarn_1024x1024():
use_qencoder
=
False
,
cpu_offload
=
True
,
lora_names
=
[
"realism"
,
"ghibsky"
,
"anime"
,
"sketch"
,
"yarn"
,
"haunted_linework"
,
"turbo8"
],
lora_strengths
=
[
0
,
0
,
0
,
0
,
0
,
1
,
1
],
lora_strengths
=
[
0
,
1
,
0
,
0
,
0
,
0
,
1
],
cache_threshold
=
0
,
expected_lpips
=
0.310
,
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment