Commit 06d90947 authored by muyangli's avatar muyangli
Browse files

chore: clean debugging scripts

parent f25fa6ff
import torch
from diffusers import FluxFillPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2dModel
from nunchaku.utils import get_precision
image = load_image("./removal_image.png")
mask = load_image("./removal_mask.png")
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16)
# import ipdb
# ipdb.set_trace()
pipe.load_lora_weights(
"./loras/removalV2.safetensors"
) # Path to your LoRA safetensors, can also be a remote HuggingFace path
pipe.fuse_lora(lora_scale=1)
pipe.enable_model_cpu_offload()
image = pipe(
prompt="",
image=image,
mask_image=mask,
height=720,
width=1280,
guidance_scale=30,
num_inference_steps=20,
max_sequence_length=512,
generator=torch.Generator().manual_seed(42),
).images[0]
image.save(f"flux.1-fill-dev-bf16.png")
import torch
from diffusers import FluxFillPipeline
from diffusers.utils import load_image
from nunchaku import NunchakuFluxTransformer2dModel
from nunchaku.utils import get_precision
image = load_image("./removal_image.png")
mask = load_image("./removal_mask.png")
precision = get_precision() # auto-detect your precision is 'int4' or 'fp4' based on your GPU
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"mit-han-lab/svdq-{precision}-flux.1-fill-dev")
### LoRA Related Code ###
transformer.update_lora_params(
"loras/removalV2.safetensors"
) # Path to your LoRA safetensors, can also be a remote HuggingFace path
transformer.set_lora_strength(1) # Your LoRA strength here
### End of LoRA Related Code ###
pipe = FluxFillPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
image = pipe(
prompt="",
image=image,
mask_image=mask,
height=720,
width=1280,
guidance_scale=30,
num_inference_steps=20,
max_sequence_length=512,
generator=torch.Generator().manual_seed(42),
).images[0]
image.save(f"flux.1-fill-dev-{precision}.png")
from PIL import Image, ImageChops
# 打开 RGBA 图像
img = Image.open("removal.png").convert("RGBA")
# 拆分成 R, G, B, A 四个通道
r, g, b, a = img.split()
a_inverted = ImageChops.invert(a)
# 合并 R, G, B 成 RGB 图像
rgb_img = Image.merge("RGB", (r, g, b))
# 保存 RGB 和 A 分开的图像
rgb_img.save("removal_image.png")
a_inverted.save("removal_mask.png")
import torch
from safetensors.torch import save_file
from nunchaku.utils import load_state_dict_in_safetensors
if __name__ == "__main__":
sd = load_state_dict_in_safetensors("loras/removalV2.safetensors")
new_sd = {}
for k, v in sd.items():
if ".single_transformer_blocks." in k:
new_sd[k] = v
else:
new_sd[k] = torch.zeros_like(v)
save_file(new_sd, "loras/removalV2-single.safetensors")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment