Commit cdf5a19b authored by Hyunsung Lee's avatar Hyunsung Lee Committed by Zhekai Zhang
Browse files

Add formatting rule and format fix

parent 60a0d6d7
name: Lint
on: [push, pull_request]
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: '3.x'
python-version: '3.10'
- name: Install dependencies
run: pip install ruff yapf
run: pip install ruff
- name: Run ruff check
run: ruff check nunchaku comfyui examples tests --output-format github
- name: Run yapf check
run: yapf --diff nunchaku comfyui examples tests --recursive
repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.11.2
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.2
hooks:
- id: ruff
args: ["check", "nunchaku", "comfyui", "examples", "tests", "--output-format", "github"]
- repo: https://github.com/google/yapf
rev: v0.43.0
hooks:
- id: yapf
args: ["--diff", "--recursive", "nunchaku", "comfyui", "examples", "tests" ]
args: ["check", "--output-format", "github"]
pass_filenames: true
......@@ -42,7 +42,7 @@ class SVDQuantFluxLoraLoader:
"base_model_name": (
base_model_paths,
{
"tooltip": "If the lora format is SVDQuant, this field has no use. Otherwise, the base model's state dictionary is required for converting the LoRA weights to SVDQuant."
"tooltip": "If the lora format is SVDQuant, this field has no use. Otherwise, the base model's state dictionary is required for converting the LoRA weights to SVDQuant." # noqa: E501
},
),
"lora_strength": (
......@@ -58,7 +58,7 @@ class SVDQuantFluxLoraLoader:
"save_converted_lora": (
["disable", "enable"],
{
"tooltip": "If enabled, the converted LoRA will be saved as a .safetensors file in the save directory of your LoRA file."
"tooltip": "If enabled, the converted LoRA will be saved as a .safetensors file in the save directory of your LoRA file." # noqa: E501
},
),
}
......
......@@ -89,7 +89,7 @@ class SVDQuantFluxDiTLoader:
["auto", "enable", "disable"],
{
"default": "auto",
"tooltip": "Whether to enable CPU offload for the transformer model. 'auto' will enable it if the GPU memory is less than 14G.",
"tooltip": "Whether to enable CPU offload for the transformer model. 'auto' will enable it if the GPU memory is less than 14G.", # noqa: E501
},
),
"device_id": (
......
......@@ -23,7 +23,7 @@ processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf
control_image = processor(control_image)[0].convert("RGB")
image = pipe(
prompt="A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts.",
prompt="A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts.", # noqa: E501
control_image=control_image,
height=1024,
width=1024,
......
......@@ -16,7 +16,7 @@ transformer.set_lora_strength(1) # Your LoRA strength here
### End of LoRA Related Code ###
image = pipeline(
"GHIBSKY style, cozy mountain cabin covered in snow, with smoke curling from the chimney and a warm, inviting light spilling through the windows",
"GHIBSKY style, cozy mountain cabin covered in snow, with smoke curling from the chimney and a warm, inviting light spilling through the windows", # noqa: E501
num_inference_steps=25,
guidance_scale=3.5,
).images[0]
......
......@@ -15,7 +15,7 @@ safety_check_template = """You are a policy expert trying to help determine whet
Does the human question violate the above principle? Your answer must start
with 'Yes' or 'No'. And then walk through step by step to be sure we answer
correctly.
"""
""" # noqa: E501
class SafetyChecker:
......
......@@ -259,7 +259,13 @@ class NunchakuFluxTransformer2dModel(FluxTransformer2DModel, NunchakuModelLoader
elif "lora" in k:
new_quantized_part_sd[k] = v
transformer._quantized_part_sd = new_quantized_part_sd
m = load_quantized_module(transformer_block_path, device=device, use_fp4=precision == "fp4", offload=offload, bf16=torch_dtype == torch.bfloat16)
m = load_quantized_module(
transformer_block_path,
device=device,
use_fp4=precision == "fp4",
offload=offload,
bf16=torch_dtype == torch.bfloat16,
)
transformer.inject_quantized_module(m, device)
transformer.to_empty(device=device)
......
......@@ -14,8 +14,8 @@ include = ["nunchaku"]
line-length = 140
[tool.ruff.lint]
select = ["E", "W"]
ignore = ["F401", "E501"]
select = ["E", "W", "F"]
ignore = ["F401"]
[project]
dynamic = ["version"]
......
......@@ -13,7 +13,7 @@ def test_flux_dev_canny():
"black-forest-labs/FLUX.1-Canny-dev", transformer=transformer, torch_dtype=torch.bfloat16
).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." # noqa: E501
control_image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"
)
......@@ -38,7 +38,7 @@ def test_flux_dev_depth():
torch_dtype=torch.bfloat16,
).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." # noqa: E501
control_image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment