Unverified Commit f4977abc authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

Fix typos (#7181)

* Fix typos

* Fix typos

* Fix typos and update documentation in lora.md
parent df8559a7
...@@ -77,7 +77,7 @@ accelerate config default ...@@ -77,7 +77,7 @@ accelerate config default
Or if your environment doesn't support an interactive shell, like a notebook, you can use: Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```bash ```py
from accelerate.utils import write_basic_config from accelerate.utils import write_basic_config
write_basic_config() write_basic_config()
...@@ -170,7 +170,7 @@ Aside from setting up the LoRA layers, the training script is more or less the s ...@@ -170,7 +170,7 @@ Aside from setting up the LoRA layers, the training script is more or less the s
Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀
Let's train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate our yown Pokémon. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository: Let's train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate our own Pokémon. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository:
- saved model checkpoints - saved model checkpoints
- `pytorch_lora_weights.safetensors` (the trained LoRA weights) - `pytorch_lora_weights.safetensors` (the trained LoRA weights)
......
...@@ -128,7 +128,7 @@ seed = 2023 ...@@ -128,7 +128,7 @@ seed = 2023
# The values come from # The values come from
# https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines # https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines
pipe.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2) pipe.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2)
video_frames = pipe(prompt, height=320, width=576, num_frames=30, generator=torch.manual_seed(seed)).frames video_frames = pipe(prompt, height=320, width=576, num_frames=30, generator=torch.manual_seed(seed)).frames[0]
export_to_video(video_frames, "astronaut_rides_horse.mp4") export_to_video(video_frames, "astronaut_rides_horse.mp4")
``` ```
......
...@@ -3493,7 +3493,7 @@ output_frames = pipe( ...@@ -3493,7 +3493,7 @@ output_frames = pipe(
mask_end=0.8, mask_end=0.8,
mask_strength=0.5, mask_strength=0.5,
negative_prompt='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' negative_prompt='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
).frames ).frames[0]
export_to_video( export_to_video(
output_frames, "/path/to/video.mp4", 5) output_frames, "/path/to/video.mp4", 5)
......
...@@ -127,7 +127,7 @@ class ConfigMixin: ...@@ -127,7 +127,7 @@ class ConfigMixin:
"""The only reason we overwrite `getattr` here is to gracefully deprecate accessing """The only reason we overwrite `getattr` here is to gracefully deprecate accessing
config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 config attributes directly. See https://github.com/huggingface/diffusers/pull/3129
Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite: This function is mostly copied from PyTorch's __getattr__ overwrite:
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
""" """
...@@ -533,7 +533,7 @@ class ConfigMixin: ...@@ -533,7 +533,7 @@ class ConfigMixin:
f"{cls.config_name} configuration file." f"{cls.config_name} configuration file."
) )
# 5. Give nice info if config attributes are initiliazed to default because they have not been passed # 5. Give nice info if config attributes are initialized to default because they have not been passed
passed_keys = set(init_dict.keys()) passed_keys = set(init_dict.keys())
if len(expected_keys - passed_keys) > 0: if len(expected_keys - passed_keys) > 0:
logger.info( logger.info(
......
...@@ -332,7 +332,7 @@ class VaeImageProcessor(ConfigMixin): ...@@ -332,7 +332,7 @@ class VaeImageProcessor(ConfigMixin):
image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
height: int, height: int,
width: int, width: int,
resize_mode: str = "default", # "defalt", "fill", "crop" resize_mode: str = "default", # "default", "fill", "crop"
) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]:
""" """
Resize image. Resize image.
...@@ -448,7 +448,7 @@ class VaeImageProcessor(ConfigMixin): ...@@ -448,7 +448,7 @@ class VaeImageProcessor(ConfigMixin):
image: PipelineImageInput, image: PipelineImageInput,
height: Optional[int] = None, height: Optional[int] = None,
width: Optional[int] = None, width: Optional[int] = None,
resize_mode: str = "default", # "defalt", "fill", "crop" resize_mode: str = "default", # "default", "fill", "crop"
crops_coords: Optional[Tuple[int, int, int, int]] = None, crops_coords: Optional[Tuple[int, int, int, int]] = None,
) -> torch.Tensor: ) -> torch.Tensor:
""" """
...@@ -479,7 +479,7 @@ class VaeImageProcessor(ConfigMixin): ...@@ -479,7 +479,7 @@ class VaeImageProcessor(ConfigMixin):
if isinstance(image, torch.Tensor): if isinstance(image, torch.Tensor):
# if image is a pytorch tensor could have 2 possible shapes: # if image is a pytorch tensor could have 2 possible shapes:
# 1. batch x height x width: we should insert the channel dimension at position 1 # 1. batch x height x width: we should insert the channel dimension at position 1
# 2. channnel x height x width: we should insert batch dimension at position 0, # 2. channel x height x width: we should insert batch dimension at position 0,
# however, since both channel and batch dimension has same size 1, it is same to insert at position 1 # however, since both channel and batch dimension has same size 1, it is same to insert at position 1
# for simplicity, we insert a dimension of size 1 at position 1 for both cases # for simplicity, we insert a dimension of size 1 at position 1 for both cases
image = image.unsqueeze(1) image = image.unsqueeze(1)
......
...@@ -343,7 +343,7 @@ class AutoPipelineForText2Image(ConfigMixin): ...@@ -343,7 +343,7 @@ class AutoPipelineForText2Image(ConfigMixin):
pipeline linked to the pipeline class using pattern matching on pipeline class name. pipeline linked to the pipeline class using pattern matching on pipeline class name.
All the modules the pipeline contains will be used to initialize the new pipeline without reallocating All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
additional memoery. additional memory.
The pipeline is set in evaluation mode (`model.eval()`) by default. The pipeline is set in evaluation mode (`model.eval()`) by default.
...@@ -616,7 +616,7 @@ class AutoPipelineForImage2Image(ConfigMixin): ...@@ -616,7 +616,7 @@ class AutoPipelineForImage2Image(ConfigMixin):
image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name.
All the modules the pipeline contains will be used to initialize the new pipeline without reallocating All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
additional memoery. additional memory.
The pipeline is set in evaluation mode (`model.eval()`) by default. The pipeline is set in evaluation mode (`model.eval()`) by default.
...@@ -892,7 +892,7 @@ class AutoPipelineForInpainting(ConfigMixin): ...@@ -892,7 +892,7 @@ class AutoPipelineForInpainting(ConfigMixin):
pipeline linked to the pipeline class using pattern matching on pipeline class name. pipeline linked to the pipeline class using pattern matching on pipeline class name.
All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating
additional memoery. additional memory.
The pipeline is set in evaluation mode (`model.eval()`) by default. The pipeline is set in evaluation mode (`model.eval()`) by default.
......
...@@ -52,7 +52,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -52,7 +52,7 @@ EXAMPLE_DOC_STRING = """
>>> pipe.enable_model_cpu_offload() >>> pipe.enable_model_cpu_offload()
>>> prompt = "Spiderman is surfing" >>> prompt = "Spiderman is surfing"
>>> video_frames = pipe(prompt).frames >>> video_frames = pipe(prompt).frames[0]
>>> video_path = export_to_video(video_frames) >>> video_path = export_to_video(video_frames)
>>> video_path >>> video_path
``` ```
......
...@@ -52,7 +52,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -52,7 +52,7 @@ EXAMPLE_DOC_STRING = """
>>> pipe.to("cuda") >>> pipe.to("cuda")
>>> prompt = "spiderman running in the desert" >>> prompt = "spiderman running in the desert"
>>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]
>>> # safe low-res video >>> # safe low-res video
>>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4") >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4")
...@@ -73,7 +73,7 @@ EXAMPLE_DOC_STRING = """ ...@@ -73,7 +73,7 @@ EXAMPLE_DOC_STRING = """
>>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
>>> # and denoise it >>> # and denoise it
>>> video_frames = pipe(prompt, video=video, strength=0.6).frames >>> video_frames = pipe(prompt, video=video, strength=0.6).frames[0]
>>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4") >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4")
>>> video_path >>> video_path
``` ```
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment