flux.1-dev-double_cache_offloading.py 847 Bytes
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import torch
from diffusers import FluxPipeline

from nunchaku import NunchakuFluxTransformer2dModel
from nunchaku.caching.diffusers_adapters import apply_cache_on_pipe
from nunchaku.utils import get_precision

precision = get_precision()

transformer = NunchakuFluxTransformer2dModel.from_pretrained(
    f"mit-han-lab/nunchaku-flux.1-dev/svdq-{precision}_r32-flux.1-dev.safetensors",
    offload=True,
)

pipeline = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")

apply_cache_on_pipe(
    pipeline,
    use_double_fb_cache=True,
    residual_diff_threshold_multi=0.09,
    residual_diff_threshold_single=0.12,
)

image = pipeline(["A cat holding a sign that says hello world"], num_inference_steps=50).images[0]

image.save(f"flux.1-dev-cache-{precision}.png")