Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
fengzch-das
nunchaku
Commits
698bc83a
Commit
698bc83a
authored
Apr 17, 2025
by
muyangli
Browse files
update the text_flux_tools
parent
84df0933
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
103 additions
and
104 deletions
+103
-104
tests/flux/test_flux_tools.py
tests/flux/test_flux_tools.py
+103
-104
No files found.
tests/flux/test_flux_tools.py
View file @
698bc83a
...
...
@@ -5,11 +5,68 @@ from nunchaku.utils import get_precision, is_turing
from
.utils
import
run_test
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_canny_dev
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-canny-dev"
,
dataset_name
=
"MJHQ-control"
,
task
=
"canny"
,
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
30
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.076
if
get_precision
()
==
"int4"
else
0.164
,
)
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_depth_dev
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-depth-dev"
,
dataset_name
=
"MJHQ-control"
,
task
=
"depth"
,
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
30
,
guidance_scale
=
10
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.137
if
get_precision
()
==
"int4"
else
0.120
,
)
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_fill_dev
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-fill-dev"
,
dataset_name
=
"MJHQ-control"
,
task
=
"fill"
,
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
30
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
0.046
,
)
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_canny_
dev
():
# def test_flux_
dev_
canny_
lora
():
# run_test(
# precision=get_precision(),
# model_name="flux.1-
canny-
dev",
# model_name="flux.1-dev",
# dataset_name="MJHQ-control",
# task="canny",
# dtype=torch.bfloat16,
...
...
@@ -19,112 +76,55 @@ from .utils import run_test
# guidance_scale=30,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# cache_threshold=0,
# expected_lpips=0.076 if get_precision() == "int4" else 0.164,
# )
#
#
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_depth_dev():
# run_test(
# precision=get_precision(),
# model_name="flux.1-depth-dev",
# dataset_name="MJHQ-control",
# task="depth",
# dtype=torch.bfloat16,
# height=1024,
# width=1024,
# num_inference_steps=30,
# guidance_scale=10,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# cache_threshold=0,
# expected_lpips=0.137 if get_precision() == "int4" else 0.120,
# )
#
#
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_fill_dev():
# run_test(
# precision=get_precision(),
# model_name="flux.1-fill-dev",
# dataset_name="MJHQ-control",
# task="fill",
# dtype=torch.bfloat16,
# height=1024,
# width=1024,
# num_inference_steps=30,
# guidance_scale=30,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# cache_threshold=0,
# expected_lpips=0.046,
# )
#
#
# # @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# # def test_flux_dev_canny_lora():
# # run_test(
# # precision=get_precision(),
# # model_name="flux.1-dev",
# # dataset_name="MJHQ-control",
# # task="canny",
# # dtype=torch.bfloat16,
# # height=1024,
# # width=1024,
# # num_inference_steps=30,
# # guidance_scale=30,
# # attention_impl="nunchaku-fp16",
# # cpu_offload=False,
# # lora_names="canny",
# # lora_strengths=0.85,
# # cache_threshold=0,
# # expected_lpips=0.081,
# # )
#
#
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_dev_depth_lora():
# run_test(
# precision=get_precision(),
# model_name="flux.1-dev",
# dataset_name="MJHQ-control",
# task="depth",
# dtype=torch.bfloat16,
# height=1024,
# width=1024,
# num_inference_steps=30,
# guidance_scale=10,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# cache_threshold=0,
# lora_names="depth",
# lora_names="canny",
# lora_strengths=0.85,
# expected_lpips=0.181,
# )
#
#
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_fill_dev_turbo():
# run_test(
# precision=get_precision(),
# model_name="flux.1-fill-dev",
# dataset_name="MJHQ-control",
# task="fill",
# dtype=torch.bfloat16,
# height=1024,
# width=1024,
# num_inference_steps=8,
# guidance_scale=30,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# cache_threshold=0,
# lora_names="turbo8",
# lora_strengths=1,
# expected_lpips=0.036,
# expected_lpips=0.081,
# )
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_dev_depth_lora
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-dev"
,
dataset_name
=
"MJHQ-control"
,
task
=
"depth"
,
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
30
,
guidance_scale
=
10
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
lora_names
=
"depth"
,
lora_strengths
=
0.85
,
expected_lpips
=
0.181
,
)
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_fill_dev_turbo
():
run_test
(
precision
=
get_precision
(),
model_name
=
"flux.1-fill-dev"
,
dataset_name
=
"MJHQ-control"
,
task
=
"fill"
,
dtype
=
torch
.
bfloat16
,
height
=
1024
,
width
=
1024
,
num_inference_steps
=
8
,
guidance_scale
=
30
,
attention_impl
=
"nunchaku-fp16"
,
cpu_offload
=
False
,
cache_threshold
=
0
,
lora_names
=
"turbo8"
,
lora_strengths
=
1
,
expected_lpips
=
0.036
,
)
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_dev_redux
():
run_test
(
...
...
@@ -141,5 +141,4 @@ def test_flux_dev_redux():
cpu_offload
=
False
,
cache_threshold
=
0
,
expected_lpips
=
(
0.162
if
get_precision
()
==
"int4"
else
0.5
),
# not sure why the fp4 model is so different
max_dataset_size
=
16
,
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment