Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
fengzch-das
nunchaku
Commits
46b48063
Commit
46b48063
authored
Apr 12, 2025
by
muyangli
Browse files
better ci
parent
0a5b600c
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
36 additions
and
7 deletions
+36
-7
.github/workflows/pr_test_linux.yaml
.github/workflows/pr_test_linux.yaml
+7
-5
tests/flux/test_flux_schnell.py
tests/flux/test_flux_schnell.py
+1
-1
tests/flux/utils.py
tests/flux/utils.py
+28
-1
No files found.
.github/workflows/pr_test_linux.yaml
View file @
46b48063
...
...
@@ -8,12 +8,14 @@ on:
-
"
src/**"
-
"
tests/**"
pull_request
:
branches
:
[
main
]
types
:
[
opened
,
synchronize
,
reopened
,
edited
]
paths
:
-
"
nunchaku/**"
-
"
src/**"
-
"
tests/**"
workflow_dispatch
:
issue_comment
:
types
:
[
created
]
concurrency
:
...
...
@@ -46,7 +48,7 @@ jobs:
-
name
:
Checkout
uses
:
actions/checkout@v4
with
:
ref
:
${{ github.event.pull_request.head.sha || github.sha }}
#
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules
:
true
-
name
:
Show current commit
...
...
@@ -98,7 +100,7 @@ jobs:
source $(conda info --base)/etc/profile.d/conda.sh
conda activate test_env || { echo "Failed to activate conda env"; exit 1; }
which python
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
s
tests/flux/test_flux_memory.py
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
v
tests/flux/test_flux_memory.py
test-flux-other
:
needs
:
build
...
...
@@ -113,7 +115,7 @@ jobs:
source $(conda info --base)/etc/profile.d/conda.sh
conda activate test_env || { echo "Failed to activate conda env"; exit 1; }
which python
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
s
tests/flux --ignore=tests/flux/test_flux_memory.py
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
v
tests/flux --ignore=tests/flux/test_flux_memory.py
test-sana
:
needs
:
build
...
...
@@ -128,7 +130,7 @@ jobs:
source $(conda info --base)/etc/profile.d/conda.sh
conda activate test_env || { echo "Failed to activate conda env"; exit 1; }
which python
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
s
tests/sana
HF_TOKEN=${{ secrets.HF_TOKEN }} pytest -
v
tests/sana
clean-up
:
if
:
always() && (github.event_name != 'issue_comment' || needs.check-comment.outputs.should_run == 'true')
...
...
tests/flux/test_flux_schnell.py
View file @
46b48063
...
...
@@ -10,7 +10,7 @@ from .utils import run_test
[
(
1024
,
1024
,
"flashattn2"
,
False
,
0.126
),
(
1024
,
1024
,
"nunchaku-fp16"
,
False
,
0.126
),
(
1920
,
1080
,
"nunchaku-fp16"
,
False
,
0.1
41
),
(
1920
,
1080
,
"nunchaku-fp16"
,
False
,
0.1
58
),
(
2048
,
2048
,
"nunchaku-fp16"
,
True
,
0.166
),
],
)
...
...
tests/flux/utils.py
View file @
46b48063
import
gc
import
os
import
torch
...
...
@@ -12,6 +13,7 @@ from nunchaku import NunchakuFluxTransformer2dModel, NunchakuT5EncoderModel
from
nunchaku.lora.flux.compose
import
compose_lora
from
..data
import
get_dataset
from
..utils
import
already_generate
,
compute_lpips
,
hash_str_to_int
from
diffusers.hooks
import
apply_group_offloading
ORIGINAL_REPO_MAP
=
{
"flux.1-schnell"
:
"black-forest-labs/FLUX.1-schnell"
,
...
...
@@ -61,7 +63,13 @@ def run_pipeline(dataset, batch_size: int, task: str, pipeline: FluxPipeline, sa
assert
task
in
[
"t2i"
,
"fill"
]
processor
=
None
for
row
in
tqdm
(
dataset
.
iter
(
batch_size
=
batch_size
,
drop_last_batch
=
False
)):
for
row
in
tqdm
(
dataset
.
iter
(
batch_size
=
batch_size
,
drop_last_batch
=
False
),
desc
=
"Batch"
,
total
=
len
(
dataset
),
position
=
0
,
leave
=
False
,
):
filenames
=
row
[
"filename"
]
prompts
=
row
[
"prompt"
]
...
...
@@ -138,6 +146,8 @@ def run_test(
i2f_mode
:
str
|
None
=
None
,
expected_lpips
:
float
=
0.5
,
):
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
if
isinstance
(
dtype
,
str
):
dtype_str
=
dtype
if
dtype
==
"bf16"
:
...
...
@@ -190,6 +200,21 @@ def run_test(
if
gpu_memory
>
36
*
1024
:
pipeline
=
pipeline
.
to
(
"cuda"
)
elif
gpu_memory
<
26
*
1024
:
pipeline
.
transformer
.
enable_group_offload
(
onload_device
=
torch
.
device
(
"cuda"
),
offload_device
=
torch
.
device
(
"cpu"
),
offload_type
=
"leaf_level"
,
use_stream
=
True
,
)
pipeline
.
text_encoder
.
to
(
"cuda"
)
apply_group_offloading
(
pipeline
.
text_encoder_2
,
onload_device
=
torch
.
device
(
"cuda"
),
offload_type
=
"block_level"
,
num_blocks_per_group
=
2
,
)
pipeline
.
vae
.
to
(
"cuda"
)
else
:
pipeline
.
enable_model_cpu_offload
()
...
...
@@ -216,6 +241,7 @@ def run_test(
)
del
pipeline
# release the gpu memory
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
precision_str
=
precision
...
...
@@ -290,6 +316,7 @@ def run_test(
del
transformer
del
pipeline
# release the gpu memory
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
lpips
=
compute_lpips
(
save_dir_16bit
,
save_dir_4bit
)
print
(
f
"lpips:
{
lpips
}
"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment