Commit 667d0e91 authored by Muyang Li's avatar Muyang Li Committed by github-actions[bot]
Browse files

[Auto Sync] test: add the expected LPIPS for FP4 models on Blackwell GPUs

parent 5cfb5351
...@@ -8,7 +8,7 @@ from .utils import run_test ...@@ -8,7 +8,7 @@ from .utils import run_test
@pytest.mark.parametrize( @pytest.mark.parametrize(
"num_inference_steps,lora_name,lora_strength,cpu_offload,expected_lpips", "num_inference_steps,lora_name,lora_strength,cpu_offload,expected_lpips",
[ [
(25, "realism", 0.9, True, 0.136 if get_precision() == "int4" else 0.1), (25, "realism", 0.9, True, 0.136 if get_precision() == "int4" else 0.112),
# (25, "ghibsky", 1, False, 0.186), # (25, "ghibsky", 1, False, 0.186),
# (28, "anime", 1, False, 0.284), # (28, "anime", 1, False, 0.284),
(24, "sketch", 1, True, 0.291 if get_precision() == "int4" else 0.182), (24, "sketch", 1, True, 0.291 if get_precision() == "int4" else 0.182),
...@@ -51,5 +51,5 @@ def test_flux_dev_turbo8_ghibsky_1024x1024(): ...@@ -51,5 +51,5 @@ def test_flux_dev_turbo8_ghibsky_1024x1024():
lora_names=["realism", "ghibsky", "anime", "sketch", "yarn", "haunted_linework", "turbo8"], lora_names=["realism", "ghibsky", "anime", "sketch", "yarn", "haunted_linework", "turbo8"],
lora_strengths=[0, 1, 0, 0, 0, 0, 1], lora_strengths=[0, 1, 0, 0, 0, 0, 1],
cache_threshold=0, cache_threshold=0,
expected_lpips=0.310 if get_precision() == "int4" else 0.150, expected_lpips=0.310 if get_precision() == "int4" else 0.168,
) )
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment