Unverified Commit 86294d3c authored by co63oc's avatar co63oc Committed by GitHub
Browse files

Fix typos in docs and comments (#11416)



* Fix typos in docs and comments

* Apply style fixes

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
Co-authored-by: default avatargithub-actions[bot] <github-actions[bot]@users.noreply.github.com>
parent d70f8ee1
...@@ -123,7 +123,7 @@ def _preprocess_adapter_image(image, height, width): ...@@ -123,7 +123,7 @@ def _preprocess_adapter_image(image, height, width):
image = torch.cat(image, dim=0) image = torch.cat(image, dim=0)
else: else:
raise ValueError( raise ValueError(
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but receive: {image[0].ndim}"
) )
return image return image
......
...@@ -121,7 +121,7 @@ def _preprocess_adapter_image(image, height, width): ...@@ -121,7 +121,7 @@ def _preprocess_adapter_image(image, height, width):
image = torch.cat(image, dim=0) image = torch.cat(image, dim=0)
else: else:
raise ValueError( raise ValueError(
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but receive: {image[0].ndim}"
) )
return image return image
......
...@@ -140,7 +140,7 @@ class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): ...@@ -140,7 +140,7 @@ class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
input_ids (`torch.Tensor` of shape `(N, max_seq_len)`): input_ids (`torch.Tensor` of shape `(N, max_seq_len)`):
Text tokens to use for inference. Text tokens to use for inference.
prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`): prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`):
Prefix embedding to preprend to the embedded tokens. Prefix embedding to prepend to the embedded tokens.
attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*): attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*):
Attention mask for the prefix embedding. Attention mask for the prefix embedding.
labels (`torch.Tensor`, *optional*): labels (`torch.Tensor`, *optional*):
......
...@@ -803,7 +803,7 @@ class UniDiffuserPipeline(DiffusionPipeline): ...@@ -803,7 +803,7 @@ class UniDiffuserPipeline(DiffusionPipeline):
def _combine(self, img_vae, img_clip): def _combine(self, img_vae, img_clip):
r""" r"""
Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, Combines a latent image img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1,
clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim).
""" """
img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1))
......
...@@ -199,7 +199,7 @@ class DiffusersQuantizer(ABC): ...@@ -199,7 +199,7 @@ class DiffusersQuantizer(ABC):
def dequantize(self, model): def dequantize(self, model):
""" """
Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance. Note Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance. Note
not all quantization schemes support this. not all quantization schemes support this.
""" """
model = self._dequantize(model) model = self._dequantize(model)
......
...@@ -49,7 +49,7 @@ def _replace_with_bnb_linear( ...@@ -49,7 +49,7 @@ def _replace_with_bnb_linear(
""" """
Private method that wraps the recursion for module replacement. Private method that wraps the recursion for module replacement.
Returns the converted model and a boolean that indicates if the conversion has been successfull or not. Returns the converted model and a boolean that indicates if the conversion has been successful or not.
""" """
for name, module in model.named_children(): for name, module in model.named_children():
if current_key_name is None: if current_key_name is None:
...@@ -223,7 +223,7 @@ def _dequantize_and_replace( ...@@ -223,7 +223,7 @@ def _dequantize_and_replace(
performance drop compared to the original model before quantization - use it only for specific usecases such as performance drop compared to the original model before quantization - use it only for specific usecases such as
QLoRA adapters merging. QLoRA adapters merging.
Returns the converted model and a boolean that indicates if the conversion has been successfull or not. Returns the converted model and a boolean that indicates if the conversion has been successful or not.
""" """
quant_method = quantization_config.quantization_method() quant_method = quantization_config.quantization_method()
......
...@@ -49,7 +49,7 @@ class GGUFQuantizer(DiffusersQuantizer): ...@@ -49,7 +49,7 @@ class GGUFQuantizer(DiffusersQuantizer):
def validate_environment(self, *args, **kwargs): def validate_environment(self, *args, **kwargs):
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"): if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
raise ImportError( raise ImportError(
"Loading GGUF Parameters requires `accelerate` installed in your enviroment: `pip install 'accelerate>=0.26.0'`" "Loading GGUF Parameters requires `accelerate` installed in your environment: `pip install 'accelerate>=0.26.0'`"
) )
if not is_gguf_available() or is_gguf_version("<", "0.10.0"): if not is_gguf_available() or is_gguf_version("<", "0.10.0"):
raise ImportError( raise ImportError(
...@@ -82,7 +82,7 @@ class GGUFQuantizer(DiffusersQuantizer): ...@@ -82,7 +82,7 @@ class GGUFQuantizer(DiffusersQuantizer):
inferred_shape = _quant_shape_from_byte_shape(loaded_param_shape, type_size, block_size) inferred_shape = _quant_shape_from_byte_shape(loaded_param_shape, type_size, block_size)
if inferred_shape != current_param_shape: if inferred_shape != current_param_shape:
raise ValueError( raise ValueError(
f"{param_name} has an expected quantized shape of: {inferred_shape}, but receieved shape: {loaded_param_shape}" f"{param_name} has an expected quantized shape of: {inferred_shape}, but received shape: {loaded_param_shape}"
) )
return True return True
......
...@@ -262,7 +262,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer): ...@@ -262,7 +262,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer):
**kwargs, **kwargs,
): ):
r""" r"""
Each nn.Linear layer that needs to be quantized is processsed here. First, we set the value the weight tensor, Each nn.Linear layer that needs to be quantized is processed here. First, we set the value the weight tensor,
then we move it to the target device. Finally, we quantize the module. then we move it to the target device. Finally, we quantize the module.
""" """
module, tensor_name = get_module_from_name(model, param_name) module, tensor_name = get_module_from_name(model, param_name)
......
...@@ -218,7 +218,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -218,7 +218,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero":
raise ValueError( raise ValueError(
f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please chooose `sigma_min` instead." f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead."
) )
# setable values # setable values
......
...@@ -155,7 +155,7 @@ def export_to_video( ...@@ -155,7 +155,7 @@ def export_to_video(
bitrate: bitrate:
Set a constant bitrate for the video encoding. Default is None causing `quality` parameter to be used instead. Set a constant bitrate for the video encoding. Default is None causing `quality` parameter to be used instead.
Better quality videos with smaller file sizes will result from using the `quality` variable bitrate parameter Better quality videos with smaller file sizes will result from using the `quality` variable bitrate parameter
rather than specifiying a fixed bitrate with this parameter. rather than specifying a fixed bitrate with this parameter.
macro_block_size: macro_block_size:
Size constraint for video. Width and height, must be divisible by this number. If not divisible by this number Size constraint for video. Width and height, must be divisible by this number. If not divisible by this number
......
...@@ -153,19 +153,19 @@ def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True ...@@ -153,19 +153,19 @@ def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True
r = lora_alpha = list(rank_dict.values())[0] r = lora_alpha = list(rank_dict.values())[0]
if len(set(rank_dict.values())) > 1: if len(set(rank_dict.values())) > 1:
# get the rank occuring the most number of times # get the rank occurring the most number of times
r = collections.Counter(rank_dict.values()).most_common()[0][0] r = collections.Counter(rank_dict.values()).most_common()[0][0]
# for modules with rank different from the most occuring rank, add it to the `rank_pattern` # for modules with rank different from the most occurring rank, add it to the `rank_pattern`
rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items()))
rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()}
if network_alpha_dict is not None and len(network_alpha_dict) > 0: if network_alpha_dict is not None and len(network_alpha_dict) > 0:
if len(set(network_alpha_dict.values())) > 1: if len(set(network_alpha_dict.values())) > 1:
# get the alpha occuring the most number of times # get the alpha occurring the most number of times
lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0]
# for modules with alpha different from the most occuring alpha, add it to the `alpha_pattern` # for modules with alpha different from the most occurring alpha, add it to the `alpha_pattern`
alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items()))
if is_unet: if is_unet:
alpha_pattern = { alpha_pattern = {
......
...@@ -219,7 +219,7 @@ def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): ...@@ -219,7 +219,7 @@ def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs):
kwargs (`dict`, *args*): kwargs (`dict`, *args*):
Additional arguments to pass to the method. Additional arguments to pass to the method.
- **adapter_name**: For example, in case of PEFT, some keys will be pre-pended - **adapter_name**: For example, in case of PEFT, some keys will be prepended
with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in
`get_peft_model_state_dict` method: `get_peft_model_state_dict` method:
https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92
...@@ -290,7 +290,7 @@ def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs): ...@@ -290,7 +290,7 @@ def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs):
kwargs (`dict`, *args*): kwargs (`dict`, *args*):
Additional arguments to pass to the method. Additional arguments to pass to the method.
- **adapter_name**: For example, in case of PEFT, some keys will be pre-pended - **adapter_name**: For example, in case of PEFT, some keys will be prepended
with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in
`get_peft_model_state_dict` method: `get_peft_model_state_dict` method:
https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92
......
...@@ -61,7 +61,7 @@ def randn_tensor( ...@@ -61,7 +61,7 @@ def randn_tensor(
logger.info( logger.info(
f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f"The passed generator was created on 'cpu' even though a tensor on {device} was expected."
f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably"
f" slighly speed up this function by passing a generator that was created on the {device} device." f" slightly speed up this function by passing a generator that was created on the {device} device."
) )
elif gen_device_type != device.type and gen_device_type == "cuda": elif gen_device_type != device.type and gen_device_type == "cuda":
raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
......
...@@ -67,7 +67,7 @@ class VideoProcessor(VaeImageProcessor): ...@@ -67,7 +67,7 @@ class VideoProcessor(VaeImageProcessor):
# ensure the input is a list of videos: # ensure the input is a list of videos:
# - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray) # - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray)
# - if it is a single video, it is convereted to a list of one video. # - if it is a single video, it is converted to a list of one video.
if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5: if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5:
video = list(video) video = list(video)
elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video): elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video):
......
...@@ -187,6 +187,6 @@ class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase) ...@@ -187,6 +187,6 @@ class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase)
def test_float16_inference(self): def test_float16_inference(self):
super().test_float16_inference() super().test_float16_inference()
@unittest.skip("Test not supoorted.") @unittest.skip("Test not supported.")
def test_encode_prompt_works_in_isolation(self): def test_encode_prompt_works_in_isolation(self):
super().test_encode_prompt_works_in_isolation() super().test_encode_prompt_works_in_isolation()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment