Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
bd9d5126
"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "3341bb41cd2e3bf69e2682fbfe042b7a98b6d4fb"
Unverified
Commit
bd9d5126
authored
Jan 07, 2023
by
Kaito Sugimoto
Committed by
GitHub
Jan 07, 2023
Browse files
fix typo (#21042)
parent
f93c90d2
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
8 additions
and
8 deletions
+8
-8
src/transformers/modeling_tf_utils.py
src/transformers/modeling_tf_utils.py
+1
-1
src/transformers/models/jukebox/modeling_jukebox.py
src/transformers/models/jukebox/modeling_jukebox.py
+1
-1
src/transformers/pipelines/automatic_speech_recognition.py
src/transformers/pipelines/automatic_speech_recognition.py
+1
-1
src/transformers/pipelines/base.py
src/transformers/pipelines/base.py
+2
-2
src/transformers/pipelines/text_classification.py
src/transformers/pipelines/text_classification.py
+2
-2
src/transformers/pipelines/zero_shot_image_classification.py
src/transformers/pipelines/zero_shot_image_classification.py
+1
-1
No files found.
src/transformers/modeling_tf_utils.py
View file @
bd9d5126
...
@@ -777,7 +777,7 @@ def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatch
...
@@ -777,7 +777,7 @@ def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatch
Args:
Args:
model (`tf.keras.models.Model`): Model in which the weights are loaded
model (`tf.keras.models.Model`): Model in which the weights are loaded
model_layer_map (`Dict`): A diction
n
ary mapping the layer name to the index of the layer in the model.
model_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model.
resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys
...
...
src/transformers/models/jukebox/modeling_jukebox.py
View file @
bd9d5126
...
@@ -1972,7 +1972,7 @@ class JukeboxPrior(PreTrainedModel):
...
@@ -1972,7 +1972,7 @@ class JukeboxPrior(PreTrainedModel):
def
prior_preprocess
(
self
,
tokens
,
conds
):
def
prior_preprocess
(
self
,
tokens
,
conds
):
"""
"""
Shifts the input tokens to account for the diction
n
ary merge. The embed_dim_shift give by how much the music
Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music
tokens should be shifted by. It is equal to `lyric_vocab_size`.
tokens should be shifted by. It is equal to `lyric_vocab_size`.
"""
"""
batch_size
=
tokens
[
0
].
shape
[
0
]
batch_size
=
tokens
[
0
].
shape
[
0
]
...
...
src/transformers/pipelines/automatic_speech_recognition.py
View file @
bd9d5126
...
@@ -306,7 +306,7 @@ class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
...
@@ -306,7 +306,7 @@ class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
# better integration
# better integration
if
not
(
"sampling_rate"
in
inputs
and
(
"raw"
in
inputs
or
"array"
in
inputs
)):
if
not
(
"sampling_rate"
in
inputs
and
(
"raw"
in
inputs
or
"array"
in
inputs
)):
raise
ValueError
(
raise
ValueError
(
"When passing a diction
n
ary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "
"When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "
'"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
'"raw" key containing the numpy array representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
"containing the sampling_rate associated with that array"
)
)
...
...
src/transformers/pipelines/base.py
View file @
bd9d5126
...
@@ -945,7 +945,7 @@ class Pipeline(_ScikitCompat):
...
@@ -945,7 +945,7 @@ class Pipeline(_ScikitCompat):
@
abstractmethod
@
abstractmethod
def
preprocess
(
self
,
input_
:
Any
,
**
preprocess_parameters
:
Dict
)
->
Dict
[
str
,
GenericTensor
]:
def
preprocess
(
self
,
input_
:
Any
,
**
preprocess_parameters
:
Dict
)
->
Dict
[
str
,
GenericTensor
]:
"""
"""
Preprocess will take the `input_` of a specific pipeline and return a diction
n
ary of everything necessary for
Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for
`_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
`_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
"""
"""
raise
NotImplementedError
(
"preprocess not implemented"
)
raise
NotImplementedError
(
"preprocess not implemented"
)
...
@@ -953,7 +953,7 @@ class Pipeline(_ScikitCompat):
...
@@ -953,7 +953,7 @@ class Pipeline(_ScikitCompat):
@
abstractmethod
@
abstractmethod
def
_forward
(
self
,
input_tensors
:
Dict
[
str
,
GenericTensor
],
**
forward_parameters
:
Dict
)
->
ModelOutput
:
def
_forward
(
self
,
input_tensors
:
Dict
[
str
,
GenericTensor
],
**
forward_parameters
:
Dict
)
->
ModelOutput
:
"""
"""
_forward will receive the prepared diction
n
ary from `preprocess` and run it on the model. This method might
_forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might
involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.
and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.
...
...
src/transformers/pipelines/text_classification.py
View file @
bd9d5126
...
@@ -125,7 +125,7 @@ class TextClassificationPipeline(Pipeline):
...
@@ -125,7 +125,7 @@ class TextClassificationPipeline(Pipeline):
Args:
Args:
args (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`):
args (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`):
One or several texts to classify. In order to use text pairs for your classification, you can send a
One or several texts to classify. In order to use text pairs for your classification, you can send a
diction
n
ary containing `{"text", "text_pair"}` keys, or a list of those.
dictionary containing `{"text", "text_pair"}` keys, or a list of those.
top_k (`int`, *optional*, defaults to `1`):
top_k (`int`, *optional*, defaults to `1`):
How many results to return.
How many results to return.
function_to_apply (`str`, *optional*, defaults to `"default"`):
function_to_apply (`str`, *optional*, defaults to `"default"`):
...
@@ -174,7 +174,7 @@ class TextClassificationPipeline(Pipeline):
...
@@ -174,7 +174,7 @@ class TextClassificationPipeline(Pipeline):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise
ValueError
(
raise
ValueError
(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
' diction
n
ary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.'
)
)
return
self
.
tokenizer
(
inputs
,
return_tensors
=
return_tensors
,
**
tokenizer_kwargs
)
return
self
.
tokenizer
(
inputs
,
return_tensors
=
return_tensors
,
**
tokenizer_kwargs
)
...
...
src/transformers/pipelines/zero_shot_image_classification.py
View file @
bd9d5126
...
@@ -89,7 +89,7 @@ class ZeroShotImageClassificationPipeline(ChunkPipeline):
...
@@ -89,7 +89,7 @@ class ZeroShotImageClassificationPipeline(ChunkPipeline):
logits_per_image
logits_per_image
Return:
Return:
A list of dictionaries containing result, one diction
n
ary per proposed label. The dictionaries contain the
A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the
following keys:
following keys:
- **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`.
- **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment