Commit cdf5a19b authored by Hyunsung Lee's avatar Hyunsung Lee Committed by Zhekai Zhang
Browse files

Add formatting rule and format fix

parent 60a0d6d7
name: Lint name: Lint
on: [push, pull_request] on:
push:
branches:
- main
pull_request:
branches:
- main
jobs: jobs:
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.10'
- name: Install dependencies - name: Install dependencies
run: pip install ruff yapf run: pip install ruff
- name: Run ruff check - name: Run ruff check
run: ruff check nunchaku comfyui examples tests --output-format github run: ruff check nunchaku comfyui examples tests --output-format github
- name: Run yapf check
run: yapf --diff nunchaku comfyui examples tests --recursive
repos: repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.2 rev: v0.3.2
hooks: hooks:
- id: ruff - id: ruff
args: ["check", "nunchaku", "comfyui", "examples", "tests", "--output-format", "github"] args: ["check", "--output-format", "github"]
- repo: https://github.com/google/yapf pass_filenames: true
rev: v0.43.0
hooks:
- id: yapf
args: ["--diff", "--recursive", "nunchaku", "comfyui", "examples", "tests" ]
...@@ -42,7 +42,7 @@ class SVDQuantFluxLoraLoader: ...@@ -42,7 +42,7 @@ class SVDQuantFluxLoraLoader:
"base_model_name": ( "base_model_name": (
base_model_paths, base_model_paths,
{ {
"tooltip": "If the lora format is SVDQuant, this field has no use. Otherwise, the base model's state dictionary is required for converting the LoRA weights to SVDQuant." "tooltip": "If the lora format is SVDQuant, this field has no use. Otherwise, the base model's state dictionary is required for converting the LoRA weights to SVDQuant." # noqa: E501
}, },
), ),
"lora_strength": ( "lora_strength": (
...@@ -58,7 +58,7 @@ class SVDQuantFluxLoraLoader: ...@@ -58,7 +58,7 @@ class SVDQuantFluxLoraLoader:
"save_converted_lora": ( "save_converted_lora": (
["disable", "enable"], ["disable", "enable"],
{ {
"tooltip": "If enabled, the converted LoRA will be saved as a .safetensors file in the save directory of your LoRA file." "tooltip": "If enabled, the converted LoRA will be saved as a .safetensors file in the save directory of your LoRA file." # noqa: E501
}, },
), ),
} }
......
...@@ -89,7 +89,7 @@ class SVDQuantFluxDiTLoader: ...@@ -89,7 +89,7 @@ class SVDQuantFluxDiTLoader:
["auto", "enable", "disable"], ["auto", "enable", "disable"],
{ {
"default": "auto", "default": "auto",
"tooltip": "Whether to enable CPU offload for the transformer model. 'auto' will enable it if the GPU memory is less than 14G.", "tooltip": "Whether to enable CPU offload for the transformer model. 'auto' will enable it if the GPU memory is less than 14G.", # noqa: E501
}, },
), ),
"device_id": ( "device_id": (
......
...@@ -23,7 +23,7 @@ processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf ...@@ -23,7 +23,7 @@ processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf
control_image = processor(control_image)[0].convert("RGB") control_image = processor(control_image)[0].convert("RGB")
image = pipe( image = pipe(
prompt="A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts.", prompt="A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts.", # noqa: E501
control_image=control_image, control_image=control_image,
height=1024, height=1024,
width=1024, width=1024,
......
...@@ -16,7 +16,7 @@ transformer.set_lora_strength(1) # Your LoRA strength here ...@@ -16,7 +16,7 @@ transformer.set_lora_strength(1) # Your LoRA strength here
### End of LoRA Related Code ### ### End of LoRA Related Code ###
image = pipeline( image = pipeline(
"GHIBSKY style, cozy mountain cabin covered in snow, with smoke curling from the chimney and a warm, inviting light spilling through the windows", "GHIBSKY style, cozy mountain cabin covered in snow, with smoke curling from the chimney and a warm, inviting light spilling through the windows", # noqa: E501
num_inference_steps=25, num_inference_steps=25,
guidance_scale=3.5, guidance_scale=3.5,
).images[0] ).images[0]
......
...@@ -15,7 +15,7 @@ safety_check_template = """You are a policy expert trying to help determine whet ...@@ -15,7 +15,7 @@ safety_check_template = """You are a policy expert trying to help determine whet
Does the human question violate the above principle? Your answer must start Does the human question violate the above principle? Your answer must start
with 'Yes' or 'No'. And then walk through step by step to be sure we answer with 'Yes' or 'No'. And then walk through step by step to be sure we answer
correctly. correctly.
""" """ # noqa: E501
class SafetyChecker: class SafetyChecker:
......
...@@ -259,7 +259,13 @@ class NunchakuFluxTransformer2dModel(FluxTransformer2DModel, NunchakuModelLoader ...@@ -259,7 +259,13 @@ class NunchakuFluxTransformer2dModel(FluxTransformer2DModel, NunchakuModelLoader
elif "lora" in k: elif "lora" in k:
new_quantized_part_sd[k] = v new_quantized_part_sd[k] = v
transformer._quantized_part_sd = new_quantized_part_sd transformer._quantized_part_sd = new_quantized_part_sd
m = load_quantized_module(transformer_block_path, device=device, use_fp4=precision == "fp4", offload=offload, bf16=torch_dtype == torch.bfloat16) m = load_quantized_module(
transformer_block_path,
device=device,
use_fp4=precision == "fp4",
offload=offload,
bf16=torch_dtype == torch.bfloat16,
)
transformer.inject_quantized_module(m, device) transformer.inject_quantized_module(m, device)
transformer.to_empty(device=device) transformer.to_empty(device=device)
......
...@@ -14,8 +14,8 @@ include = ["nunchaku"] ...@@ -14,8 +14,8 @@ include = ["nunchaku"]
line-length = 140 line-length = 140
[tool.ruff.lint] [tool.ruff.lint]
select = ["E", "W"] select = ["E", "W", "F"]
ignore = ["F401", "E501"] ignore = ["F401"]
[project] [project]
dynamic = ["version"] dynamic = ["version"]
......
...@@ -13,7 +13,7 @@ def test_flux_dev_canny(): ...@@ -13,7 +13,7 @@ def test_flux_dev_canny():
"black-forest-labs/FLUX.1-Canny-dev", transformer=transformer, torch_dtype=torch.bfloat16 "black-forest-labs/FLUX.1-Canny-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda") ).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." # noqa: E501
control_image = load_image( control_image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png" "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"
) )
...@@ -38,7 +38,7 @@ def test_flux_dev_depth(): ...@@ -38,7 +38,7 @@ def test_flux_dev_depth():
torch_dtype=torch.bfloat16, torch_dtype=torch.bfloat16,
).to("cuda") ).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." # noqa: E501
control_image = load_image( control_image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png" "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment