Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
fengzch-das
nunchaku
Commits
e2116d7b
Unverified
Commit
e2116d7b
authored
Jun 01, 2025
by
Muyang Li
Committed by
GitHub
Jun 01, 2025
Browse files
chore: release v0.3.0
parents
6098c419
d94c2078
Changes
64
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
15 additions
and
32 deletions
+15
-32
tests/flux/test_flux_teacache.py
tests/flux/test_flux_teacache.py
+4
-2
tests/flux/test_flux_tools.py
tests/flux/test_flux_tools.py
+0
-21
tests/flux/test_lora_reset.py
tests/flux/test_lora_reset.py
+2
-2
tests/flux/utils.py
tests/flux/utils.py
+9
-7
No files found.
tests/flux/test_flux_teacache.py
View file @
e2116d7b
...
...
@@ -44,7 +44,7 @@ from .utils import already_generate, compute_lpips, offload_pipeline
"muppets"
,
42
,
0.3
,
0.
360
if
get_precision
()
==
"int4"
else
0.495
,
0.
507
if
get_precision
()
==
"int4"
else
0.495
,
),
(
1024
,
...
...
@@ -112,7 +112,9 @@ def test_flux_teacache(
# Then, generate results with the 4-bit model
if
not
already_generate
(
results_dir_4_bit
,
1
):
transformer
=
NunchakuFluxTransformer2dModel
.
from_pretrained
(
f
"mit-han-lab/svdq-
{
precision
}
-flux.1-dev"
)
transformer
=
NunchakuFluxTransformer2dModel
.
from_pretrained
(
f
"mit-han-lab/nunchaku-flux.1-dev/svdq-
{
precision
}
_r32-flux.1-dev.safetensors"
)
pipeline
=
FluxPipeline
.
from_pretrained
(
"black-forest-labs/FLUX.1-dev"
,
transformer
=
transformer
,
torch_dtype
=
torch
.
bfloat16
).
to
(
"cuda"
)
...
...
tests/flux/test_flux_tools.py
View file @
e2116d7b
...
...
@@ -63,27 +63,6 @@ def test_flux_fill_dev():
)
# @pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
# def test_flux_dev_canny_lora():
# run_test(
# precision=get_precision(),
# model_name="flux.1-dev",
# dataset_name="MJHQ-control",
# task="canny",
# dtype=torch.bfloat16,
# height=1024,
# width=1024,
# num_inference_steps=30,
# guidance_scale=30,
# attention_impl="nunchaku-fp16",
# cpu_offload=False,
# lora_names="canny",
# lora_strengths=0.85,
# cache_threshold=0,
# expected_lpips=0.081,
# )
@
pytest
.
mark
.
skipif
(
is_turing
(),
reason
=
"Skip tests due to using Turing GPUs"
)
def
test_flux_dev_depth_lora
():
run_test
(
...
...
tests/flux/test_lora_reset.py
View file @
e2116d7b
...
...
@@ -12,7 +12,7 @@ from ..utils import compute_lpips
def
test_lora_reset
():
precision
=
get_precision
()
# auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer
=
NunchakuFluxTransformer2dModel
.
from_pretrained
(
f
"mit-han-lab/svdq-
{
precision
}
-flux.1-dev"
,
offload
=
True
f
"mit-han-lab/
nunchaku-flux.1-dev/
svdq-
{
precision
}
_r32
-flux.1-dev
.safetensors
"
,
offload
=
True
)
pipeline
=
FluxPipeline
.
from_pretrained
(
"black-forest-labs/FLUX.1-dev"
,
transformer
=
transformer
,
torch_dtype
=
torch
.
bfloat16
...
...
@@ -44,4 +44,4 @@ def test_lora_reset():
lpips
=
compute_lpips
(
os
.
path
.
join
(
save_dir
,
"before.png"
),
os
.
path
.
join
(
save_dir
,
"after.png"
))
print
(
f
"LPIPS:
{
lpips
}
"
)
assert
lpips
<
0.
179
*
1.1
assert
lpips
<
0.
232
*
1.1
tests/flux/utils.py
View file @
e2116d7b
...
...
@@ -28,12 +28,12 @@ ORIGINAL_REPO_MAP = {
}
NUNCHAKU_REPO_PATTERN_MAP
=
{
"flux.1-schnell"
:
"mit-han-lab/svdq-{precision}-flux.1-schnell"
,
"flux.1-dev"
:
"mit-han-lab/svdq-{precision}-flux.1-dev"
,
"shuttle-jaguar"
:
"mit-han-lab/svdq-{precision}-shuttle-jaguar"
,
"flux.1-canny-dev"
:
"mit-han-lab/svdq-{precision}-flux.1-canny-dev"
,
"flux.1-depth-dev"
:
"mit-han-lab/svdq-{precision}-flux.1-depth-dev"
,
"flux.1-fill-dev"
:
"mit-han-lab/svdq-{precision}-flux.1-fill-dev"
,
"flux.1-schnell"
:
"mit-han-lab/
nunchaku-flux.1-schnell/
svdq-{precision}
_r32
-flux.1-schnell
.safetensors
"
,
"flux.1-dev"
:
"mit-han-lab/
nunchaku-flux.1-dev/
svdq-{precision}
_r32
-flux.1-dev
.safetensors
"
,
"shuttle-jaguar"
:
"mit-han-lab/
nunchaku-shuttle-jaguar/
svdq-{precision}
_r32
-shuttle-jaguar
.safetensors
"
,
"flux.1-canny-dev"
:
"mit-han-lab/
nunchaku-flux.1-canny-dev/
svdq-{precision}
_r32
-flux.1-canny-dev
.safetensors
"
,
"flux.1-depth-dev"
:
"mit-han-lab/
nunchaku-flux.1-depth-dev/
svdq-{precision}
_r32
-flux.1-depth-dev
.safetensors
"
,
"flux.1-fill-dev"
:
"mit-han-lab/
nunchaku-flux.1-fill-dev/
svdq-{precision}
_r32
-flux.1-fill-dev
.safetensors
"
,
}
LORA_PATH_MAP
=
{
...
...
@@ -285,7 +285,9 @@ def run_test(
if
task
==
"redux"
:
pipeline_init_kwargs
.
update
({
"text_encoder"
:
None
,
"text_encoder_2"
:
None
})
elif
use_qencoder
:
text_encoder_2
=
NunchakuT5EncoderModel
.
from_pretrained
(
"mit-han-lab/svdq-flux.1-t5"
)
text_encoder_2
=
NunchakuT5EncoderModel
.
from_pretrained
(
"mit-han-lab/nunchaku-t5/awq-int4-flux.1-t5xxl.safetensors"
)
pipeline_init_kwargs
[
"text_encoder_2"
]
=
text_encoder_2
pipeline
=
pipeline_cls
.
from_pretrained
(
model_id_16bit
,
torch_dtype
=
dtype
,
**
pipeline_init_kwargs
)
if
cpu_offload
:
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment