Unverified Commit 01977466 authored by arfy slowy's avatar arfy slowy Committed by GitHub
Browse files

fix: typo spelling grammar (#13212)

* fix: typo spelling grammar

* fix: make fixup
parent ef83dc4f
...@@ -158,7 +158,7 @@ def validate_model_outputs( ...@@ -158,7 +158,7 @@ def validate_model_outputs(
# We flatten potential collection of outputs (i.e. past_keys) to a flat structure # We flatten potential collection of outputs (i.e. past_keys) to a flat structure
for name, value in ref_outputs.items(): for name, value in ref_outputs.items():
# Overwriting the output name as "present" since it is the name used for the ONNX ouputs # Overwriting the output name as "present" since it is the name used for the ONNX outputs
# ("past_key_values" being taken for the ONNX inputs) # ("past_key_values" being taken for the ONNX inputs)
if name == "past_key_values": if name == "past_key_values":
name = "present" name = "present"
......
...@@ -114,7 +114,7 @@ class FeaturesManager: ...@@ -114,7 +114,7 @@ class FeaturesManager:
Args: Args:
model: The model to export model: The model to export
feature: The name of the feature to check if it is avaiable feature: The name of the feature to check if it is available
Returns: Returns:
(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties
......
...@@ -1375,7 +1375,7 @@ INIT_TOKENIZER_DOCSTRING = r""" ...@@ -1375,7 +1375,7 @@ INIT_TOKENIZER_DOCSTRING = r"""
high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the
low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the
:obj:`url` to the associated pretrained vocabulary file. :obj:`url` to the associated pretrained vocabulary file.
- **max_model_input_sizes** (:obj:`Dict[str, Optinal[int]]`) -- A dictionary with, as keys, the - **max_model_input_sizes** (:obj:`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the
:obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence :obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence
inputs of this model, or :obj:`None` if the model has no maximum input size. inputs of this model, or :obj:`None` if the model has no maximum input size.
- **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the - **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
...@@ -1785,7 +1785,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): ...@@ -1785,7 +1785,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path) config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
config_tokenizer_class = config.tokenizer_class config_tokenizer_class = config.tokenizer_class
except (OSError, ValueError, KeyError): except (OSError, ValueError, KeyError):
# skip if an error occured. # skip if an error occurred.
config = None config = None
if config_tokenizer_class is None: if config_tokenizer_class is None:
# Third attempt. If we have not yet found the original type of the tokenizer, # Third attempt. If we have not yet found the original type of the tokenizer,
......
...@@ -707,7 +707,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase): ...@@ -707,7 +707,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
special_token_full = getattr(self, f"_{token}") special_token_full = getattr(self, f"_{token}")
if isinstance(special_token_full, AddedToken): if isinstance(special_token_full, AddedToken):
# Create an added token with the same paramters except the content # Create an added token with the same parameters except the content
kwargs[token] = AddedToken( kwargs[token] = AddedToken(
special_token, special_token,
single_word=special_token_full.single_word, single_word=special_token_full.single_word,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment