Unverified Commit 5ad7f170 authored by fzyzcjy's avatar fzyzcjy Committed by GitHub
Browse files

Super tiny fix 12 typos about "with with" (#29926)

* with with

* style
parent 43d17c18
...@@ -24,7 +24,7 @@ This model was contributed by [Connor Henderson](https://huggingface.co/connor-h ...@@ -24,7 +24,7 @@ This model was contributed by [Connor Henderson](https://huggingface.co/connor-h
## 🤗 Model Architecture ## 🤗 Model Architecture
FastSpeech2's general structure with a Mel-spectrogram decoder was implemented, and the traditional transformer blocks were replaced with with conformer blocks as done in the ESPnet library. FastSpeech2's general structure with a Mel-spectrogram decoder was implemented, and the traditional transformer blocks were replaced with conformer blocks as done in the ESPnet library.
#### FastSpeech2 Model Architecture #### FastSpeech2 Model Architecture
![FastSpeech2 Model Architecture](https://www.microsoft.com/en-us/research/uploads/prod/2021/04/fastspeech2-1.png) ![FastSpeech2 Model Architecture](https://www.microsoft.com/en-us/research/uploads/prod/2021/04/fastspeech2-1.png)
......
...@@ -400,7 +400,7 @@ class AlignVisionExpansionLayer(nn.Module): ...@@ -400,7 +400,7 @@ class AlignVisionExpansionLayer(nn.Module):
return hidden_states return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with with EfficientNet->AlignVision # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with EfficientNet->AlignVision
class AlignVisionDepthwiseLayer(nn.Module): class AlignVisionDepthwiseLayer(nn.Module):
r""" r"""
This corresponds to the depthwise convolution phase of each block in the original implementation. This corresponds to the depthwise convolution phase of each block in the original implementation.
...@@ -440,7 +440,7 @@ class AlignVisionDepthwiseLayer(nn.Module): ...@@ -440,7 +440,7 @@ class AlignVisionDepthwiseLayer(nn.Module):
return hidden_states return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with with EfficientNet->AlignVision # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
class AlignVisionSqueezeExciteLayer(nn.Module): class AlignVisionSqueezeExciteLayer(nn.Module):
r""" r"""
This corresponds to the Squeeze and Excitement phase of each block in the original implementation. This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
......
...@@ -2093,7 +2093,7 @@ class BartDecoderWrapper(BartPreTrainedModel): ...@@ -2093,7 +2093,7 @@ class BartDecoderWrapper(BartPreTrainedModel):
@add_start_docstrings( @add_start_docstrings(
""" """
BART decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). BART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
""", """,
BART_START_DOCSTRING, BART_START_DOCSTRING,
) )
......
...@@ -154,8 +154,7 @@ class OpenLlamaConfig(PretrainedConfig): ...@@ -154,8 +154,7 @@ class OpenLlamaConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -177,8 +177,7 @@ class FalconConfig(PretrainedConfig): ...@@ -177,8 +177,7 @@ class FalconConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -199,8 +199,7 @@ class FuyuConfig(PretrainedConfig): ...@@ -199,8 +199,7 @@ class FuyuConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -167,8 +167,7 @@ class GPTNeoXConfig(PretrainedConfig): ...@@ -167,8 +167,7 @@ class GPTNeoXConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -179,8 +179,7 @@ class LlamaConfig(PretrainedConfig): ...@@ -179,8 +179,7 @@ class LlamaConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -151,8 +151,7 @@ class PersimmonConfig(PretrainedConfig): ...@@ -151,8 +151,7 @@ class PersimmonConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -179,8 +179,7 @@ class PhiConfig(PretrainedConfig): ...@@ -179,8 +179,7 @@ class PhiConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -168,8 +168,7 @@ class StableLmConfig(PretrainedConfig): ...@@ -168,8 +168,7 @@ class StableLmConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError( raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
f"got {self.rope_scaling}"
) )
rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None) rope_scaling_factor = self.rope_scaling.get("factor", None)
......
...@@ -1861,7 +1861,7 @@ class WhisperDecoderWrapper(WhisperPreTrainedModel): ...@@ -1861,7 +1861,7 @@ class WhisperDecoderWrapper(WhisperPreTrainedModel):
@add_start_docstrings( @add_start_docstrings(
""" """
Whisper decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
""", """,
WHISPER_START_DOCSTRING, WHISPER_START_DOCSTRING,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment