Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
da72ac6e
Unverified
Commit
da72ac6e
authored
Jul 17, 2021
by
Sylvain Gugger
Committed by
GitHub
Jul 17, 2021
Browse files
Fix push_to_hub docstring and make it appear in doc (#12770)
parent
08d609bf
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
70 additions
and
29 deletions
+70
-29
docs/source/main_classes/configuration.rst
docs/source/main_classes/configuration.rst
+1
-0
docs/source/main_classes/model.rst
docs/source/main_classes/model.rst
+3
-0
docs/source/main_classes/tokenizer.rst
docs/source/main_classes/tokenizer.rst
+4
-8
src/transformers/configuration_utils.py
src/transformers/configuration_utils.py
+15
-1
src/transformers/file_utils.py
src/transformers/file_utils.py
+16
-20
src/transformers/modeling_flax_utils.py
src/transformers/modeling_flax_utils.py
+7
-0
src/transformers/modeling_tf_utils.py
src/transformers/modeling_tf_utils.py
+8
-0
src/transformers/modeling_utils.py
src/transformers/modeling_utils.py
+8
-0
src/transformers/tokenization_utils_base.py
src/transformers/tokenization_utils_base.py
+8
-0
No files found.
docs/source/main_classes/configuration.rst
View file @
da72ac6e
...
...
@@ -22,4 +22,5 @@ PretrainedConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.PretrainedConfig
:special-members: push_to_hub
:members:
docs/source/main_classes/model.rst
View file @
da72ac6e
...
...
@@ -35,6 +35,7 @@ PreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.PreTrainedModel
:special-members: push_to_hub
:members:
...
...
@@ -80,6 +81,7 @@ TFPreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFPreTrainedModel
:special-members: push_to_hub
:members:
...
...
@@ -94,6 +96,7 @@ FlaxPreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FlaxPreTrainedModel
:special-members: push_to_hub
:members:
...
...
docs/source/main_classes/tokenizer.rst
View file @
da72ac6e
...
...
@@ -53,10 +53,8 @@ PreTrainedTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.PreTrainedTokenizer
:special-members: __call__
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add, prepare_for_tokenization, tokenize,
vocab_size
:special-members: __call__, batch_decode, decode, encode, push_to_hub
:members:
PreTrainedTokenizerFast
...
...
@@ -68,10 +66,8 @@ loaded very simply into 🤗 transformers. Take a look at the :doc:`Using tokeni
<../fast_tokenizers>` page to understand how this is done.
.. autoclass:: transformers.PreTrainedTokenizerFast
:special-members: __call__
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add,
set_truncation_and_padding,tokenize, vocab_size
:special-members: __call__, batch_decode, decode, encode, push_to_hub
:members:
BatchEncoding
...
...
src/transformers/configuration_utils.py
View file @
da72ac6e
...
...
@@ -22,7 +22,15 @@ import os
from
typing
import
Any
,
Dict
,
Tuple
,
Union
from
.
import
__version__
from
.file_utils
import
CONFIG_NAME
,
PushToHubMixin
,
cached_path
,
hf_bucket_url
,
is_offline_mode
,
is_remote_url
from
.file_utils
import
(
CONFIG_NAME
,
PushToHubMixin
,
cached_path
,
copy_func
,
hf_bucket_url
,
is_offline_mode
,
is_remote_url
,
)
from
.utils
import
logging
...
...
@@ -729,3 +737,9 @@ class PretrainedConfig(PushToHubMixin):
)
setattr
(
self
,
k
,
v
)
PretrainedConfig
.
push_to_hub
=
copy_func
(
PretrainedConfig
.
push_to_hub
)
PretrainedConfig
.
push_to_hub
.
__doc__
=
PretrainedConfig
.
push_to_hub
.
__doc__
.
format
(
object
=
"config"
,
object_class
=
"AutoConfig"
,
object_files
=
"configuration file"
)
src/transformers/file_utils.py
View file @
da72ac6e
...
...
@@ -1991,14 +1991,14 @@ class PushToHubMixin:
use_auth_token
:
Optional
[
Union
[
bool
,
str
]]
=
None
,
)
->
str
:
"""
Upload
model checkpoint or tokenizer
files to the 🤗 Model Hub while synchronizing a local clone of the repo in
Upload
the {object_
files
}
to the 🤗 Model Hub while synchronizing a local clone of the repo in
:obj:`repo_path_or_name`.
Parameters:
repo_path_or_name (:obj:`str`, `optional`):
Can either be a repository name for your
model or tokenizer
in the Hub or a path to a local folder (in
which case
the repository will have the name of that local folder). If not specified, will default to
the name
given by :obj:`repo_url` and a local directory with that name will be created.
Can either be a repository name for your
{object}
in the Hub or a path to a local folder (in
which case
the repository will have the name of that local folder). If not specified, will default to
the name
given by :obj:`repo_url` and a local directory with that name will be created.
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
repository will be created in your namespace (unless you specify an :obj:`organization`) with
...
...
@@ -2008,11 +2008,9 @@ class PushToHubMixin:
the current working directory. This will slow things down if you are making changes in an existing repo
since you will need to clone the repo before every push.
commit_message (:obj:`str`, `optional`):
Message to commit while pushing. Will default to :obj:`"add config"`, :obj:`"add tokenizer"` or
:obj:`"add model"` depending on the type of the class.
Message to commit while pushing. Will default to :obj:`"add {object}"`.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model or tokenizer (you must be a member of this
organization).
Organization in which you want to push your {object} (you must be a member of this organization).
private (:obj:`bool`, `optional`):
Whether or not the repository created should be private (requires a paying subscription).
use_auth_token (:obj:`bool` or :obj:`str`, `optional`):
...
...
@@ -2022,29 +2020,27 @@ class PushToHubMixin:
Returns:
The url of the commit of your
model
in the given repository.
:obj:`str`:
The url of the commit of your
{object}
in the given repository.
Examples::
# Upload a model to the Hub:
from transformers import AutoModel
from transformers import {object_class}
model = BertModel.from_pretrained("bert-base-cased")
# Fine-tuning code
{object} = {object_class}.from_pretrained("bert-base-cased")
# Push the
model
to your namespace with the name "my-finetuned-bert" and have a local clone in the
# Push the
{object}
to your namespace with the name "my-finetuned-bert" and have a local clone in the
# `my-finetuned-bert` folder.
model
.push_to_hub("my-finetuned-bert")
{object}
.push_to_hub("my-finetuned-bert")
# Push the
model
to your namespace with the name "my-finetuned-bert" with no local clone.
model
.push_to_hub("my-finetuned-bert", use_temp_dir=True)
# Push the
{object}
to your namespace with the name "my-finetuned-bert" with no local clone.
{object}
.push_to_hub("my-finetuned-bert", use_temp_dir=True)
# Push the
model
to an organization with the name "my-finetuned-bert" and have a local clone in the
# Push the
{object}
to an organization with the name "my-finetuned-bert" and have a local clone in the
# `my-finetuned-bert` folder.
model
.push_to_hub("my-finetuned-bert", organization="huggingface")
{object}
.push_to_hub("my-finetuned-bert", organization="huggingface")
# Make a change to an existing repo that has been cloned locally in `my-finetuned-bert`.
model
.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")
{object}
.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")
"""
if
use_temp_dir
:
# Make sure we use the right `repo_name` for the `repo_url` before replacing it.
...
...
src/transformers/modeling_flax_utils.py
View file @
da72ac6e
...
...
@@ -490,6 +490,13 @@ class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin):
logger
.
info
(
f
"Model pushed to the hub in this commit:
{
url
}
"
)
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
FlaxPreTrainedModel
.
push_to_hub
=
copy_func
(
FlaxPreTrainedModel
.
push_to_hub
)
FlaxPreTrainedModel
.
push_to_hub
.
__doc__
=
FlaxPreTrainedModel
.
push_to_hub
.
__doc__
.
format
(
object
=
"model"
,
object_class
=
"FlaxAutoModel"
,
object_files
=
"model checkpoint"
)
def
overwrite_call_docstring
(
model_class
,
docstring
):
# copy __call__ function to be sure docstring is changed only for this function
model_class
.
__call__
=
copy_func
(
model_class
.
__call__
)
...
...
src/transformers/modeling_tf_utils.py
View file @
da72ac6e
...
...
@@ -36,6 +36,7 @@ from .file_utils import (
ModelOutput
,
PushToHubMixin
,
cached_path
,
copy_func
,
hf_bucket_url
,
is_offline_mode
,
is_remote_url
,
...
...
@@ -1392,6 +1393,13 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
return
model
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel
.
push_to_hub
=
copy_func
(
TFPreTrainedModel
.
push_to_hub
)
TFPreTrainedModel
.
push_to_hub
.
__doc__
=
TFPreTrainedModel
.
push_to_hub
.
__doc__
.
format
(
object
=
"model"
,
object_class
=
"TFAutoModel"
,
object_files
=
"model checkpoint"
)
class
TFConv1D
(
tf
.
keras
.
layers
.
Layer
):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
...
...
src/transformers/modeling_utils.py
View file @
da72ac6e
...
...
@@ -38,6 +38,7 @@ from .file_utils import (
ModelOutput
,
PushToHubMixin
,
cached_path
,
copy_func
,
hf_bucket_url
,
is_offline_mode
,
is_remote_url
,
...
...
@@ -1555,6 +1556,13 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
return
retrieved_modules
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
PreTrainedModel
.
push_to_hub
=
copy_func
(
PreTrainedModel
.
push_to_hub
)
PreTrainedModel
.
push_to_hub
.
__doc__
=
PreTrainedModel
.
push_to_hub
.
__doc__
.
format
(
object
=
"model"
,
object_class
=
"AutoModel"
,
object_files
=
"model checkpoint"
)
class
Conv1D
(
nn
.
Module
):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
...
...
src/transformers/tokenization_utils_base.py
View file @
da72ac6e
...
...
@@ -43,6 +43,7 @@ from .file_utils import (
_is_torch_device
,
add_end_docstrings
,
cached_path
,
copy_func
,
hf_bucket_url
,
is_flax_available
,
is_offline_mode
,
...
...
@@ -3371,3 +3372,10 @@ For a more complete example, see the implementation of `prepare_seq2seq_batch`.
)
model_inputs
[
"labels"
]
=
labels
[
"input_ids"
]
return
model_inputs
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
PreTrainedTokenizerBase
.
push_to_hub
=
copy_func
(
PreTrainedTokenizerBase
.
push_to_hub
)
PreTrainedTokenizerBase
.
push_to_hub
.
__doc__
=
PreTrainedTokenizerBase
.
push_to_hub
.
__doc__
.
format
(
object
=
"tokenizer"
,
object_class
=
"AutoTokenizer"
,
object_files
=
"tokenizer files"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment