Commit 2ea0c3d6 authored by muyangli's avatar muyangli
Browse files

fix the number of steps of the memory test

parent efca5106
...@@ -38,7 +38,7 @@ def test_flux_schnell_memory(use_qencoder: bool, cpu_offload: bool, memory_limit ...@@ -38,7 +38,7 @@ def test_flux_schnell_memory(use_qencoder: bool, cpu_offload: bool, memory_limit
pipeline = pipeline.to("cuda") pipeline = pipeline.to("cuda")
pipeline( pipeline(
"A cat holding a sign that says hello world", width=1024, height=1024, num_inference_steps=50, guidance_scale=0 "A cat holding a sign that says hello world", width=1024, height=1024, num_inference_steps=4, guidance_scale=0
) )
memory = torch.cuda.max_memory_reserved(0) / 1024**3 memory = torch.cuda.max_memory_reserved(0) / 1024**3
assert memory < memory_limit assert memory < memory_limit
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment