Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
e513c16e
Unverified
Commit
e513c16e
authored
Dec 06, 2021
by
Sylvain Gugger
Committed by
GitHub
Dec 06, 2021
Browse files
Fix syntax for class references (#14644)
parent
e9688875
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
25 additions
and
25 deletions
+25
-25
src/transformers/generation_flax_utils.py
src/transformers/generation_flax_utils.py
+4
-4
src/transformers/generation_utils.py
src/transformers/generation_utils.py
+4
-4
src/transformers/modeling_tf_utils.py
src/transformers/modeling_tf_utils.py
+1
-1
src/transformers/models/tapas/tokenization_tapas.py
src/transformers/models/tapas/tokenization_tapas.py
+1
-1
src/transformers/pipelines/__init__.py
src/transformers/pipelines/__init__.py
+4
-4
src/transformers/pipelines/audio_classification.py
src/transformers/pipelines/audio_classification.py
+1
-1
src/transformers/pipelines/automatic_speech_recognition.py
src/transformers/pipelines/automatic_speech_recognition.py
+4
-4
src/transformers/pipelines/base.py
src/transformers/pipelines/base.py
+2
-2
src/transformers/pipelines/feature_extraction.py
src/transformers/pipelines/feature_extraction.py
+2
-2
src/transformers/pipelines/zero_shot_classification.py
src/transformers/pipelines/zero_shot_classification.py
+1
-1
src/transformers/trainer.py
src/transformers/trainer.py
+1
-1
No files found.
src/transformers/generation_flax_utils.py
View file @
e513c16e
...
@@ -326,8 +326,8 @@ class FlaxGenerationMixin:
...
@@ -326,8 +326,8 @@ class FlaxGenerationMixin:
self
,
top_k
:
int
=
None
,
top_p
:
float
=
None
,
temperature
:
float
=
None
self
,
top_k
:
int
=
None
,
top_p
:
float
=
None
,
temperature
:
float
=
None
)
->
FlaxLogitsProcessorList
:
)
->
FlaxLogitsProcessorList
:
"""
"""
This class returns a :
obj
:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
This class returns a :
class
:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:
obj
:`~transformers.FlaxLogitsWarper` instances used for multinomial sampling.
:
class
:`~transformers.FlaxLogitsWarper` instances used for multinomial sampling.
"""
"""
# init warp parameters
# init warp parameters
...
@@ -358,8 +358,8 @@ class FlaxGenerationMixin:
...
@@ -358,8 +358,8 @@ class FlaxGenerationMixin:
forced_eos_token_id
:
int
,
forced_eos_token_id
:
int
,
)
->
FlaxLogitsProcessorList
:
)
->
FlaxLogitsProcessorList
:
"""
"""
This class returns a :
obj
:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
This class returns a :
class
:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:
obj
:`~transformers.FlaxLogitsProcessor` instances used to modify the scores of the language model head.
:
class
:`~transformers.FlaxLogitsProcessor` instances used to modify the scores of the language model head.
"""
"""
processors
=
FlaxLogitsProcessorList
()
processors
=
FlaxLogitsProcessorList
()
...
...
src/transformers/generation_utils.py
View file @
e513c16e
...
@@ -535,8 +535,8 @@ class GenerationMixin:
...
@@ -535,8 +535,8 @@ class GenerationMixin:
self
,
top_k
:
int
=
None
,
top_p
:
float
=
None
,
temperature
:
float
=
None
,
num_beams
:
int
=
None
self
,
top_k
:
int
=
None
,
top_p
:
float
=
None
,
temperature
:
float
=
None
,
num_beams
:
int
=
None
)
->
LogitsProcessorList
:
)
->
LogitsProcessorList
:
"""
"""
This class returns a :
obj
:`~transformers.LogitsProcessorList` list object that contains all relevant
This class returns a :
class
:`~transformers.LogitsProcessorList` list object that contains all relevant
:
obj
:`~transformers.LogitsWarper` instances used for multinomial sampling.
:
class
:`~transformers.LogitsWarper` instances used for multinomial sampling.
"""
"""
# init warp parameters
# init warp parameters
...
@@ -575,8 +575,8 @@ class GenerationMixin:
...
@@ -575,8 +575,8 @@ class GenerationMixin:
remove_invalid_values
:
bool
,
remove_invalid_values
:
bool
,
)
->
LogitsProcessorList
:
)
->
LogitsProcessorList
:
"""
"""
This class returns a :
obj
:`~transformers.LogitsProcessorList` list object that contains all relevant
This class returns a :
class
:`~transformers.LogitsProcessorList` list object that contains all relevant
:
obj
:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
:
class
:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
"""
"""
processors
=
LogitsProcessorList
()
processors
=
LogitsProcessorList
()
...
...
src/transformers/modeling_tf_utils.py
View file @
e513c16e
...
@@ -737,7 +737,7 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
...
@@ -737,7 +737,7 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
Prepare the output of the saved model. Each model must implement this function.
Prepare the output of the saved model. Each model must implement this function.
Args:
Args:
output (:
obj
:`~transformers.TFBaseModelOutput`):
output (:
class
:`~transformers.TFBaseModelOutput`):
The output returned by the model.
The output returned by the model.
"""
"""
raise
NotImplementedError
raise
NotImplementedError
...
...
src/transformers/models/tapas/tokenization_tapas.py
View file @
e513c16e
...
@@ -1277,7 +1277,7 @@ class TapasTokenizer(PreTrainedTokenizer):
...
@@ -1277,7 +1277,7 @@ class TapasTokenizer(PreTrainedTokenizer):
Total number of table columns
Total number of table columns
max_length (:obj:`int`):
max_length (:obj:`int`):
Total maximum length.
Total maximum length.
truncation_strategy (:obj:`str` or :
obj
:`~transformers.TapasTruncationStrategy`):
truncation_strategy (:obj:`str` or :
class
:`~transformers.TapasTruncationStrategy`):
Truncation strategy to use. Seeing as this method should only be called when truncating, the only
Truncation strategy to use. Seeing as this method should only be called when truncating, the only
available strategy is the :obj:`"drop_rows_to_fit"` strategy.
available strategy is the :obj:`"drop_rows_to_fit"` strategy.
...
...
src/transformers/pipelines/__init__.py
View file @
e513c16e
...
@@ -372,13 +372,13 @@ def pipeline(
...
@@ -372,13 +372,13 @@ def pipeline(
- :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`:.
- :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`:.
- :obj:`"zero-shot-classification"`: will return a :class:`~transformers.ZeroShotClassificationPipeline`:.
- :obj:`"zero-shot-classification"`: will return a :class:`~transformers.ZeroShotClassificationPipeline`:.
model (:obj:`str` or :
obj
:`~transformers.PreTrainedModel` or :
obj
:`~transformers.TFPreTrainedModel`, `optional`):
model (:obj:`str` or :
class
:`~transformers.PreTrainedModel` or :
class
:`~transformers.TFPreTrainedModel`, `optional`):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
If not provided, the default for the :obj:`task` will be loaded.
If not provided, the default for the :obj:`task` will be loaded.
config (:obj:`str` or :
obj
:`~transformers.PretrainedConfig`, `optional`):
config (:obj:`str` or :
class
:`~transformers.PretrainedConfig`, `optional`):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from
identifier or an actual pretrained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
:class:`~transformers.PretrainedConfig`.
...
@@ -386,7 +386,7 @@ def pipeline(
...
@@ -386,7 +386,7 @@ def pipeline(
If not provided, the default configuration file for the requested model will be used. That means that if
If not provided, the default configuration file for the requested model will be used. That means that if
:obj:`model` is given, its default configuration will be used. However, if :obj:`model` is not supplied,
:obj:`model` is given, its default configuration will be used. However, if :obj:`model` is not supplied,
this :obj:`task`'s default model's config is used instead.
this :obj:`task`'s default model's config is used instead.
tokenizer (:obj:`str` or :
obj
:`~transformers.PreTrainedTokenizer`, `optional`):
tokenizer (:obj:`str` or :
class
:`~transformers.PreTrainedTokenizer`, `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
...
@@ -394,7 +394,7 @@ def pipeline(
...
@@ -394,7 +394,7 @@ def pipeline(
:obj:`model` is not specified or not a string, then the default tokenizer for :obj:`config` is loaded (if
:obj:`model` is not specified or not a string, then the default tokenizer for :obj:`config` is loaded (if
it is a string). However, if :obj:`config` is also not given or not a string, then the default tokenizer
it is a string). However, if :obj:`config` is also not given or not a string, then the default tokenizer
for the given :obj:`task` will be loaded.
for the given :obj:`task` will be loaded.
feature_extractor (:obj:`str` or :
obj
:`~transformers.PreTrainedFeatureExtractor`, `optional`):
feature_extractor (:obj:`str` or :
class
:`~transformers.PreTrainedFeatureExtractor`, `optional`):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from
identifier or an actual pretrained feature extractor inheriting from
:class:`~transformers.PreTrainedFeatureExtractor`.
:class:`~transformers.PreTrainedFeatureExtractor`.
...
...
src/transformers/pipelines/audio_classification.py
View file @
e513c16e
...
@@ -93,7 +93,7 @@ class AudioClassificationPipeline(Pipeline):
...
@@ -93,7 +93,7 @@ class AudioClassificationPipeline(Pipeline):
**
kwargs
,
**
kwargs
,
):
):
"""
"""
Classify the sequence(s) given as inputs. See the :
obj
:`~transformers.AutomaticSpeechRecognitionPipeline`
Classify the sequence(s) given as inputs. See the :
class
:`~transformers.AutomaticSpeechRecognitionPipeline`
documentation for more information.
documentation for more information.
Args:
Args:
...
...
src/transformers/pipelines/automatic_speech_recognition.py
View file @
e513c16e
...
@@ -77,13 +77,13 @@ class AutomaticSpeechRecognitionPipeline(Pipeline):
...
@@ -77,13 +77,13 @@ class AutomaticSpeechRecognitionPipeline(Pipeline):
def
__init__
(
self
,
feature_extractor
:
Union
[
"SequenceFeatureExtractor"
,
str
],
*
args
,
**
kwargs
):
def
__init__
(
self
,
feature_extractor
:
Union
[
"SequenceFeatureExtractor"
,
str
],
*
args
,
**
kwargs
):
"""
"""
Arguments:
Arguments:
feature_extractor (:
obj
:`~transformers.SequenceFeatureExtractor`):
feature_extractor (:
class
:`~transformers.SequenceFeatureExtractor`):
The feature extractor that will be used by the pipeline to encode waveform for the model.
The feature extractor that will be used by the pipeline to encode waveform for the model.
model (:
obj
:`~transformers.PreTrainedModel` or :
obj
:`~transformers.TFPreTrainedModel`):
model (:
class
:`~transformers.PreTrainedModel` or :
class
:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting
from :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel`
from :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel`
for TensorFlow.
for TensorFlow.
tokenizer (:
obj
:`~transformers.PreTrainedTokenizer`):
tokenizer (:
class
:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
...
@@ -114,7 +114,7 @@ class AutomaticSpeechRecognitionPipeline(Pipeline):
...
@@ -114,7 +114,7 @@ class AutomaticSpeechRecognitionPipeline(Pipeline):
**
kwargs
,
**
kwargs
,
):
):
"""
"""
Classify the sequence(s) given as inputs. See the :
obj
:`~transformers.AutomaticSpeechRecognitionPipeline`
Classify the sequence(s) given as inputs. See the :
class
:`~transformers.AutomaticSpeechRecognitionPipeline`
documentation for more information.
documentation for more information.
Args:
Args:
...
...
src/transformers/pipelines/base.py
View file @
e513c16e
...
@@ -644,11 +644,11 @@ class _ScikitCompat(ABC):
...
@@ -644,11 +644,11 @@ class _ScikitCompat(ABC):
PIPELINE_INIT_ARGS
=
r
"""
PIPELINE_INIT_ARGS
=
r
"""
Arguments:
Arguments:
model (:
obj
:`~transformers.PreTrainedModel` or :
obj
:`~transformers.TFPreTrainedModel`):
model (:
class
:`~transformers.PreTrainedModel` or :
class
:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
TensorFlow.
tokenizer (:
obj
:`~transformers.PreTrainedTokenizer`):
tokenizer (:
class
:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
...
...
src/transformers/pipelines/feature_extraction.py
View file @
e513c16e
...
@@ -16,11 +16,11 @@ class FeatureExtractionPipeline(Pipeline):
...
@@ -16,11 +16,11 @@ class FeatureExtractionPipeline(Pipeline):
`huggingface.co/models <https://huggingface.co/models>`__.
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
Arguments:
model (:
obj
:`~transformers.PreTrainedModel` or :
obj
:`~transformers.TFPreTrainedModel`):
model (:
class
:`~transformers.PreTrainedModel` or :
class
:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
TensorFlow.
tokenizer (:
obj
:`~transformers.PreTrainedTokenizer`):
tokenizer (:
class
:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
...
...
src/transformers/pipelines/zero_shot_classification.py
View file @
e513c16e
...
@@ -154,7 +154,7 @@ class ZeroShotClassificationPipeline(Pipeline):
...
@@ -154,7 +154,7 @@ class ZeroShotClassificationPipeline(Pipeline):
**
kwargs
,
**
kwargs
,
):
):
"""
"""
Classify the sequence(s) given as inputs. See the :
obj
:`~transformers.ZeroShotClassificationPipeline`
Classify the sequence(s) given as inputs. See the :
class
:`~transformers.ZeroShotClassificationPipeline`
documentation for more information.
documentation for more information.
Args:
Args:
...
...
src/transformers/trainer.py
View file @
e513c16e
...
@@ -239,7 +239,7 @@ class Trainer:
...
@@ -239,7 +239,7 @@ class Trainer:
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :
obj
:`~transformers.TrainerCallback`, `optional`):
callbacks (List of :
class
:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
detailed in :doc:`here <callback>`.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment