Commit c00fb4b7 authored by muyangli's avatar muyangli
Browse files

finalize the ci

parent 3ecf0d25
......@@ -8,7 +8,7 @@ from .utils import run_test
@pytest.mark.parametrize(
"cache_threshold,height,width,num_inference_steps,lora_name,lora_strength,expected_lpips",
[
(0.12, 1024, 1024, 30, None, 1, 0.26),
(0.12, 1024, 1024, 30, None, 1, 0.212),
],
)
def test_flux_dev_loras(
......
......@@ -8,8 +8,8 @@ from .utils import run_test
@pytest.mark.parametrize(
"height,width,num_inference_steps,attention_impl,cpu_offload,expected_lpips",
[
(1024, 1024, 50, "flashattn2", False, 0.226),
(2048, 512, 25, "nunchaku-fp16", False, 0.243),
(1024, 1024, 50, "flashattn2", False, 0.139),
(2048, 512, 25, "nunchaku-fp16", False, 0.148),
],
)
def test_flux_dev(
......
......@@ -8,10 +8,10 @@ from .utils import run_test
@pytest.mark.parametrize(
"num_inference_steps,lora_name,lora_strength,cpu_offload,expected_lpips",
[
(25, "realism", 0.9, True, 0.178),
(25, "ghibsky", 1, False, 0.164),
(25, "realism", 0.9, True, 0.136),
(25, "ghibsky", 1, False, 0.186),
# (28, "anime", 1, False, 0.284),
(24, "sketch", 1, True, 0.223),
(24, "sketch", 1, True, 0.260),
# (28, "yarn", 1, False, 0.211),
# (25, "haunted_linework", 1, True, 0.317),
],
......@@ -51,7 +51,7 @@ def test_flux_dev_hypersd8_1536x2048():
lora_names="hypersd8",
lora_strengths=0.125,
cache_threshold=0,
expected_lpips=0.291,
expected_lpips=0.164,
)
......@@ -71,7 +71,7 @@ def test_flux_dev_turbo8_1024x1920():
lora_names="turbo8",
lora_strengths=1,
cache_threshold=0,
expected_lpips=0.189,
expected_lpips=0.120,
)
......@@ -91,7 +91,7 @@ def test_flux_dev_turbo8_yarn_2048x1024():
lora_names=["turbo8", "yarn"],
lora_strengths=[1, 1],
cache_threshold=0,
expected_lpips=0.252,
expected_lpips=0.255,
)
......@@ -111,5 +111,5 @@ def test_flux_dev_turbo8_yarn_1024x1024():
lora_names=["realism", "ghibsky", "anime", "sketch", "yarn", "haunted_linework", "turbo8"],
lora_strengths=[0, 0, 0, 0, 0, 1, 1],
cache_threshold=0,
expected_lpips=0.44,
expected_lpips=0.310,
)
......@@ -8,10 +8,10 @@ from .utils import run_test
@pytest.mark.parametrize(
"height,width,attention_impl,cpu_offload,expected_lpips",
[
(1024, 1024, "flashattn2", False, 0.250),
(1024, 1024, "nunchaku-fp16", False, 0.255),
(1920, 1080, "nunchaku-fp16", False, 0.253),
(2048, 2048, "nunchaku-fp16", True, 0.274),
(1024, 1024, "flashattn2", False, 0.126),
(1024, 1024, "nunchaku-fp16", False, 0.126),
(1920, 1080, "nunchaku-fp16", False, 0.141),
(2048, 2048, "nunchaku-fp16", True, 0.166),
],
)
def test_int4_schnell(height: int, width: int, attention_impl: str, cpu_offload: bool, expected_lpips: float):
......
......@@ -15,12 +15,12 @@ def test_flux_canny_dev():
dtype=torch.bfloat16,
height=1024,
width=1024,
num_inference_steps=50,
num_inference_steps=30,
guidance_scale=30,
attention_impl="nunchaku-fp16",
cpu_offload=False,
cache_threshold=0,
expected_lpips=0.103 if get_precision() == "int4" else 0.164,
expected_lpips=0.076 if get_precision() == "int4" else 0.164,
)
......@@ -39,7 +39,7 @@ def test_flux_depth_dev():
attention_impl="nunchaku-fp16",
cpu_offload=False,
cache_threshold=0,
expected_lpips=0.170 if get_precision() == "int4" else 0.120,
expected_lpips=0.137 if get_precision() == "int4" else 0.120,
)
......@@ -53,12 +53,12 @@ def test_flux_fill_dev():
dtype=torch.bfloat16,
height=1024,
width=1024,
num_inference_steps=50,
num_inference_steps=30,
guidance_scale=30,
attention_impl="nunchaku-fp16",
cpu_offload=False,
cache_threshold=0,
expected_lpips=0.045,
expected_lpips=0.046,
)
......@@ -72,14 +72,14 @@ def test_flux_dev_canny_lora():
dtype=torch.bfloat16,
height=1024,
width=1024,
num_inference_steps=50,
num_inference_steps=30,
guidance_scale=30,
attention_impl="nunchaku-fp16",
cpu_offload=False,
lora_names="canny",
lora_strengths=0.85,
cache_threshold=0,
expected_lpips=0.103,
expected_lpips=0.081,
)
......@@ -100,7 +100,7 @@ def test_flux_dev_depth_lora():
cache_threshold=0,
lora_names="depth",
lora_strengths=0.85,
expected_lpips=0.163,
expected_lpips=0.181,
)
......@@ -121,7 +121,7 @@ def test_flux_fill_dev_turbo():
cache_threshold=0,
lora_names="turbo8",
lora_strengths=1,
expected_lpips=0.048,
expected_lpips=0.036,
)
......@@ -135,10 +135,10 @@ def test_flux_dev_redux():
dtype=torch.bfloat16,
height=1024,
width=1024,
num_inference_steps=50,
num_inference_steps=20,
guidance_scale=2.5,
attention_impl="nunchaku-fp16",
cpu_offload=False,
cache_threshold=0,
expected_lpips=(0.198 if get_precision() == "int4" else 0.198),
expected_lpips=(0.143 if get_precision() == "int4" else 0.198),
)
......@@ -6,7 +6,7 @@ from nunchaku.utils import get_precision, is_turing
@pytest.mark.skipif(is_turing(), reason="Skip tests due to using Turing GPUs")
@pytest.mark.parametrize(
"height,width,attention_impl,cpu_offload,expected_lpips", [(1024, 1024, "nunchaku-fp16", False, 0.25)]
"height,width,attention_impl,cpu_offload,expected_lpips", [(1024, 1024, "nunchaku-fp16", False, 0.186)]
)
def test_shuttle_jaguar(height: int, width: int, attention_impl: str, cpu_offload: bool, expected_lpips: float):
run_test(
......
......@@ -134,7 +134,7 @@ def run_test(
cache_threshold: float = 0,
lora_names: str | list[str] | None = None,
lora_strengths: float | list[float] = 1.0,
max_dataset_size: int = 8,
max_dataset_size: int = 4,
i2f_mode: str | None = None,
expected_lpips: float = 0.5,
):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment