flux.1-canny-dev-lora.py 1.48 KB
Newer Older
Muyang Li's avatar
Muyang Li committed
1
2
3
4
5
6
import torch
from controlnet_aux import CannyDetector
from diffusers import FluxControlPipeline
from diffusers.utils import load_image

from nunchaku import NunchakuFluxTransformer2dModel
7
from nunchaku.utils import get_precision
Muyang Li's avatar
Muyang Li committed
8

9
10
precision = get_precision()  # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"mit-han-lab/svdq-{precision}-flux.1-dev")
Muyang Li's avatar
Muyang Li committed
11
12
13
14
15
16
17
18
19
20
21
pipe = FluxControlPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")

### LoRA Related Code ###
transformer.update_lora_params(
    "black-forest-labs/FLUX.1-Canny-dev-lora/flux1-canny-dev-lora.safetensors"
)  # Path to your LoRA safetensors, can also be a remote HuggingFace path
transformer.set_lora_strength(0.85)  # Your LoRA strength here
### End of LoRA Related Code ###

22
23
24
25
prompt = (
    "A robot made of exotic candies and chocolates of different kinds. "
    "The background is filled with confetti and celebratory gifts."
)
Muyang Li's avatar
Muyang Li committed
26
27
28
29
30
31
32
33
34
35
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")

processor = CannyDetector()
control_image = processor(
    control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024
)

image = pipe(
    prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=50, guidance_scale=30.0
).images[0]
36
image.save(f"flux.1-canny-dev-lora-{precision}.png")