Unverified Commit 86294d3c authored by co63oc's avatar co63oc Committed by GitHub
Browse files

Fix typos in docs and comments (#11416)



* Fix typos in docs and comments

* Apply style fixes

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
Co-authored-by: default avatargithub-actions[bot] <github-actions[bot]@users.noreply.github.com>
parent d70f8ee1
......@@ -567,7 +567,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
'The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only'
'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only'
),
)
......
......@@ -596,7 +596,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
'The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
),
)
......
......@@ -514,7 +514,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
'The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
),
)
......
......@@ -513,7 +513,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
'The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only'
),
)
......
......@@ -576,7 +576,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
"The transformer block layers to apply LoRA training on. Please specify the layers in a comma seperated string."
"The transformer block layers to apply LoRA training on. Please specify the layers in a comma separated string."
"For examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md"
),
)
......@@ -585,7 +585,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
"The transformer blocks to apply LoRA training on. Please specify the block numbers in a comma seperated manner."
"The transformer blocks to apply LoRA training on. Please specify the block numbers in a comma separated manner."
'E.g. - "--lora_blocks 12,30" will result in lora training of transformer blocks 12 and 30. For more examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md'
),
)
......
......@@ -664,7 +664,7 @@ def parse_args(input_args=None):
action="store_true",
default=False,
help=(
"Wether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://arxiv.org/abs/2402.09353. "
"Whether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://arxiv.org/abs/2402.09353. "
"Note: to use DoRA you need to install peft from main, `pip install git+https://github.com/huggingface/peft.git`"
),
)
......
......@@ -329,7 +329,7 @@ def parse_args(input_args=None):
type=str,
default=None,
help=(
'The transformer modules to apply LoRA training on. Please specify the layers in a comma seperated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only'
'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only'
),
)
parser.add_argument(
......
......@@ -400,7 +400,7 @@ def main():
image_encoder.requires_grad_(False)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
......
......@@ -1147,7 +1147,7 @@ def main(args):
tracker_config = dict(vars(args))
accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
# Function for unwraping if torch.compile() was used in accelerate.
# Function for unwrapping if torch.compile() was used in accelerate.
def unwrap_model(model):
model = accelerator.unwrap_model(model)
model = model._orig_mod if is_compiled_module(model) else model
......
......@@ -69,7 +69,7 @@ accelerate launch --config_file=accelerate.yaml \
--seed="0"
```
We can direcly pass a quantized checkpoint path, too:
We can directly pass a quantized checkpoint path, too:
```diff
+ --quantized_model_path="hf-internal-testing/flux.1-dev-nf4-pkg"
......
......@@ -13,7 +13,7 @@ args = parser.parse_args()
device = "cpu"
prompt = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
prompt = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brightly buildings"
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id)
......
......@@ -80,7 +80,7 @@ export INT8_MODEL_NAME="./int8_model"
python text2images.py \
--pretrained_model_name_or_path=$INT8_MODEL_NAME \
--caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings." \
--caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brightly buildings." \
--images_num 4
```
......
......@@ -664,7 +664,7 @@ class PixArtAlphaControlnetPipeline(DiffusionPipeline):
# &amp
caption = re.sub(r"&amp", "", caption)
# ip adresses:
# ip addresses:
caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
# article ids:
......
......@@ -612,7 +612,7 @@ def main():
# See Section 3.1. of the paper.
max_length = 120
# For mixed precision training we cast all non-trainable weigths (vae, text_encoder) to half-precision
# For mixed precision training we cast all non-trainable weights (vae, text_encoder) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
......
......@@ -120,11 +120,11 @@ if __name__ == "__main__":
parser.add_argument("--schnell", action="store_true", help="run flux schnell instead of dev")
parser.add_argument("--width", type=int, default=1024, help="width of the image to generate")
parser.add_argument("--height", type=int, default=1024, help="height of the image to generate")
parser.add_argument("--guidance", type=float, default=3.5, help="gauidance strentgh for dev")
parser.add_argument("--guidance", type=float, default=3.5, help="guidance strength for dev")
parser.add_argument("--seed", type=int, default=None, help="seed for inference")
parser.add_argument("--profile", action="store_true", help="enable profiling")
parser.add_argument("--profile-duration", type=int, default=10000, help="duration for profiling in msec.")
parser.add_argument("--itters", type=int, default=15, help="tiems to run inference and get avg time in sec.")
parser.add_argument("--itters", type=int, default=15, help="items to run inference and get avg time in sec.")
args = parser.parse_args()
if args.schnell:
ckpt_id = "black-forest-labs/FLUX.1-schnell"
......
......@@ -759,7 +759,7 @@ def main(args):
unet, text_encoder, optimizer, train_dataloader
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
......
......@@ -661,7 +661,7 @@ def parse_args(input_args=None):
action="store_true",
default=False,
help=(
"Wether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://arxiv.org/abs/2402.09353. "
"Whether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://arxiv.org/abs/2402.09353. "
"Note: to use DoRA you need to install peft from main, `pip install git+https://github.com/huggingface/peft.git`"
),
)
......
......@@ -789,7 +789,7 @@ def main():
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
......
......@@ -814,7 +814,7 @@ def main():
text_encoder_1, text_encoder_2, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
......
......@@ -220,7 +220,7 @@ def convert_flux_transformer_checkpoint_to_diffusers(
f"double_blocks.{i}.txt_attn.proj.bias"
)
# single transfomer blocks
# single transformer blocks
for i in range(num_single_layers):
block_prefix = f"single_transformer_blocks.{i}."
# norm.linear <- single_blocks.0.modulation.lin
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment