Unverified Commit eca77f47 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Updates the default branch from master to main (#16326)



* Updates the default branch from master to main

* Links from `master` to `main`

* Typo

* Update examples/flax/README.md
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 77321481
...@@ -48,7 +48,7 @@ resolver.convert_models(['heb-eng', 'eng-heb']) ...@@ -48,7 +48,7 @@ resolver.convert_models(['heb-eng', 'eng-heb'])
### Upload converted models ### Upload converted models
Since version v3.5.0, the model sharing workflow is switched to git-based system . Refer to [model sharing doc](https://huggingface.co/transformers/master/model_sharing.html#model-sharing-and-uploading) for more details. Since version v3.5.0, the model sharing workflow is switched to git-based system . Refer to [model sharing doc](https://huggingface.co/transformers/main/model_sharing.html#model-sharing-and-uploading) for more details.
To upload all converted models, To upload all converted models,
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
""" """
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py
To create the package for pypi. To create the package for pypi.
...@@ -26,10 +26,10 @@ To create the package for pypi. ...@@ -26,10 +26,10 @@ To create the package for pypi.
4. Commit these changes with the message: "Release: <VERSION>" and push. 4. Commit these changes with the message: "Release: <VERSION>" and push.
5. Wait for the tests on master to be completed and be green (otherwise revert and fix bugs) 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs)
6. Add a tag in git to mark the release: "git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' " 6. Add a tag in git to mark the release: "git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' "
Push the tag to git: git push --tags origin master Push the tag to git: git push --tags origin main
7. Build both the sources and the wheel. Do not change anything in setup.py between 7. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously). creating the wheel and the source distribution (obviously).
...@@ -60,7 +60,7 @@ To create the package for pypi. ...@@ -60,7 +60,7 @@ To create the package for pypi.
10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. 10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release,
you need to go back to master before executing this. you need to go back to main before executing this.
""" """
import os import os
......
...@@ -87,7 +87,7 @@ class GlueDataset(Dataset): ...@@ -87,7 +87,7 @@ class GlueDataset(Dataset):
warnings.warn( warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: " "library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py", "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
FutureWarning, FutureWarning,
) )
self.args = args self.args = args
......
...@@ -53,7 +53,7 @@ class TextDataset(Dataset): ...@@ -53,7 +53,7 @@ class TextDataset(Dataset):
): ):
warnings.warn( warnings.warn(
DEPRECATION_WARNING.format( DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
), ),
FutureWarning, FutureWarning,
) )
...@@ -120,7 +120,7 @@ class LineByLineTextDataset(Dataset): ...@@ -120,7 +120,7 @@ class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int): def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
warnings.warn( warnings.warn(
DEPRECATION_WARNING.format( DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
), ),
FutureWarning, FutureWarning,
) )
...@@ -153,7 +153,7 @@ class LineByLineWithRefDataset(Dataset): ...@@ -153,7 +153,7 @@ class LineByLineWithRefDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str): def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
warnings.warn( warnings.warn(
DEPRECATION_WARNING.format( DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm_wwm.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
), ),
FutureWarning, FutureWarning,
) )
...@@ -201,7 +201,7 @@ class LineByLineWithSOPTextDataset(Dataset): ...@@ -201,7 +201,7 @@ class LineByLineWithSOPTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int): def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
warnings.warn( warnings.warn(
DEPRECATION_WARNING.format( DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
), ),
FutureWarning, FutureWarning,
) )
...@@ -361,7 +361,7 @@ class TextDatasetForNextSentencePrediction(Dataset): ...@@ -361,7 +361,7 @@ class TextDatasetForNextSentencePrediction(Dataset):
): ):
warnings.warn( warnings.warn(
DEPRECATION_WARNING.format( DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
), ),
FutureWarning, FutureWarning,
) )
......
...@@ -28,7 +28,7 @@ if is_sklearn_available(): ...@@ -28,7 +28,7 @@ if is_sklearn_available():
DEPRECATION_WARNING = ( DEPRECATION_WARNING = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Datasets " "This metric will be removed from the library soon, metrics should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: " "library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
) )
......
...@@ -35,7 +35,7 @@ logger = logging.get_logger(__name__) ...@@ -35,7 +35,7 @@ logger = logging.get_logger(__name__)
DEPRECATION_WARNING = ( DEPRECATION_WARNING = (
"This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: " "library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py" "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
) )
......
...@@ -598,7 +598,7 @@ class TFRagModel(TFRagPreTrainedModel): ...@@ -598,7 +598,7 @@ class TFRagModel(TFRagPreTrainedModel):
question_enc_outputs = self.question_encoder( question_enc_outputs = self.question_encoder(
input_ids, attention_mask=attention_mask, return_dict=True, training=training input_ids, attention_mask=attention_mask, return_dict=True, training=training
) )
# see https://github.com/huggingface/transformers/blob/master/src/transformers/models/dpr/modeling_tf_dpr.py#L91 # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91
question_encoder_last_hidden_state = question_enc_outputs[ question_encoder_last_hidden_state = question_enc_outputs[
0 0
] # hidden states of question encoder => pooler_output ] # hidden states of question encoder => pooler_output
...@@ -748,7 +748,7 @@ class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss ...@@ -748,7 +748,7 @@ class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss
def set_retriever(self, retriever: RagRetriever): def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever self.rag.retriever = retriever
# Adapted from https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_tf_bart.py # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py
def prepare_inputs_for_generation( def prepare_inputs_for_generation(
self, self,
decoder_input_ids, decoder_input_ids,
......
...@@ -109,7 +109,7 @@ class TFTrainer: ...@@ -109,7 +109,7 @@ class TFTrainer:
"The class `TFTrainer` is deprecated and will be removed in version 5 of Transformers. " "The class `TFTrainer` is deprecated and will be removed in version 5 of Transformers. "
"We recommend using native Keras instead, by calling methods like `fit()` and `predict()` " "We recommend using native Keras instead, by calling methods like `fit()` and `predict()` "
"directly on the model object. Detailed examples of the Keras style can be found in our " "directly on the model object. Detailed examples of the Keras style can be found in our "
"examples at https://github.com/huggingface/transformers/tree/master/examples/tensorflow", "examples at https://github.com/huggingface/transformers/tree/main/examples/tensorflow",
FutureWarning, FutureWarning,
) )
......
...@@ -101,16 +101,16 @@ class TrainingArguments: ...@@ -101,16 +101,16 @@ class TrainingArguments:
do_train (`bool`, *optional*, defaults to `False`): do_train (`bool`, *optional*, defaults to `False`):
Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used
by your training/evaluation scripts instead. See the [example by your training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
do_eval (`bool`, *optional*): do_eval (`bool`, *optional*):
Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is
different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your
training/evaluation scripts instead. See the [example training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
do_predict (`bool`, *optional*, defaults to `False`): do_predict (`bool`, *optional*, defaults to `False`):
Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's
intended to be used by your training/evaluation scripts instead. See the [example intended to be used by your training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`):
The evaluation strategy to adopt during training. Possible values are: The evaluation strategy to adopt during training. Possible values are:
...@@ -385,7 +385,7 @@ class TrainingArguments: ...@@ -385,7 +385,7 @@ class TrainingArguments:
resume_from_checkpoint (`str`, *optional*): resume_from_checkpoint (`str`, *optional*):
The path to a folder with a valid checkpoint for your model. This argument is not directly used by The path to a folder with a valid checkpoint for your model. This argument is not directly used by
[`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
hub_model_id (`str`, *optional*): hub_model_id (`str`, *optional*):
The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in
which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, which case the model will be pushed in your namespace. Otherwise it should be the whole repository name,
......
...@@ -46,16 +46,16 @@ class TFTrainingArguments(TrainingArguments): ...@@ -46,16 +46,16 @@ class TFTrainingArguments(TrainingArguments):
do_train (`bool`, *optional*, defaults to `False`): do_train (`bool`, *optional*, defaults to `False`):
Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used
by your training/evaluation scripts instead. See the [example by your training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
do_eval (`bool`, *optional*): do_eval (`bool`, *optional*):
Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is Whether to run evaluation on the validation set or not. Will be set to `True` if `evaluation_strategy` is
different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your
training/evaluation scripts instead. See the [example training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
do_predict (`bool`, *optional*, defaults to `False`): do_predict (`bool`, *optional*, defaults to `False`):
Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's
intended to be used by your training/evaluation scripts instead. See the [example intended to be used by your training/evaluation scripts instead. See the [example
scripts](https://github.com/huggingface/transformers/tree/master/examples) for more details. scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details.
evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): evaluation_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`):
The evaluation strategy to adopt during training. Possible values are: The evaluation strategy to adopt during training. Possible values are:
......
...@@ -116,5 +116,5 @@ def require_version(requirement: str, hint: Optional[str] = None) -> None: ...@@ -116,5 +116,5 @@ def require_version(requirement: str, hint: Optional[str] = None) -> None:
def require_version_core(requirement): def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure""" """require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master" hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(requirement, hint) return require_version(requirement, hint)
...@@ -562,12 +562,12 @@ Cookiecutter! ...@@ -562,12 +562,12 @@ Cookiecutter!
**Use the Cookiecutter to automatically generate the model's code** **Use the Cookiecutter to automatically generate the model's code**
To begin with head over to the [🤗 Transformers To begin with head over to the [🤗 Transformers
templates](https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model) templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)
to make use of our `cookiecutter` implementation to automatically to make use of our `cookiecutter` implementation to automatically
generate all the relevant files for your model. Again, we recommend only generate all the relevant files for your model. Again, we recommend only
adding the PyTorch version of the model at first. Make sure you follow adding the PyTorch version of the model at first. Make sure you follow
the instructions of the `README.md` on the [🤗 Transformers the instructions of the `README.md` on the [🤗 Transformers
templates](https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model) templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)
carefully. carefully.
**Open a Pull Request on the main huggingface/transformers repo** **Open a Pull Request on the main huggingface/transformers repo**
...@@ -580,7 +580,7 @@ Transformers. ...@@ -580,7 +580,7 @@ Transformers.
You should do the following: You should do the following:
1. Create a branch with a descriptive name from your master branch 1. Create a branch with a descriptive name from your main branch
``` ```
git checkout -b add_[lowercase name of model] git checkout -b add_[lowercase name of model]
...@@ -593,11 +593,11 @@ You should do the following: ...@@ -593,11 +593,11 @@ You should do the following:
git commit git commit
``` ```
3. Fetch and rebase to current master 3. Fetch and rebase to current main
``` ```
git fetch upstream git fetch upstream
git rebase upstream/master git rebase upstream/main
``` ```
4. Push the changes to your account using: 4. Push the changes to your account using:
...@@ -617,10 +617,10 @@ You should do the following: ...@@ -617,10 +617,10 @@ You should do the following:
In the following, whenever you have done some progress, don't forget to In the following, whenever you have done some progress, don't forget to
commit your work and push it to your account so that it shows in the commit your work and push it to your account so that it shows in the
pull request. Additionally, you should make sure to update your work pull request. Additionally, you should make sure to update your work
with the current master from time to time by doing: with the current main from time to time by doing:
git fetch upstream git fetch upstream
git merge upstream/master git merge upstream/main
In general, all questions you might have regarding the model or your In general, all questions you might have regarding the model or your
implementation should be asked in your PR and discussed/solved in the implementation should be asked in your PR and discussed/solved in the
...@@ -703,7 +703,7 @@ similar already existing conversion script for your model. ...@@ -703,7 +703,7 @@ similar already existing conversion script for your model.
[here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91)
- If you are porting a model from PyTorch to PyTorch, a good starting - If you are porting a model from PyTorch to PyTorch, a good starting
point might be BART's conversion script point might be BART's conversion script
[here](https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py)
In the following, we'll quickly explain how PyTorch models store layer In the following, we'll quickly explain how PyTorch models store layer
weights and define layer names. In PyTorch, the name of a layer is weights and define layer names. In PyTorch, the name of a layer is
...@@ -1122,7 +1122,7 @@ for the community. ...@@ -1122,7 +1122,7 @@ for the community.
**14. Submit your finished PR** **14. Submit your finished PR**
You're done programming now and can move to the last step, which is You're done programming now and can move to the last step, which is
getting your PR merged into master. Usually, [name of mentor] getting your PR merged into main. Usually, [name of mentor]
should have helped you already at this point, but it is worth taking should have helped you already at this point, but it is worth taking
some time to give your finished PR a nice description and eventually add some time to give your finished PR a nice description and eventually add
comments to your code, if you want to point out certain design choices comments to your code, if you want to point out certain design choices
......
...@@ -254,7 +254,7 @@ You should have understood the following aspects of BigBird by now: ...@@ -254,7 +254,7 @@ You should have understood the following aspects of BigBird by now:
- BigBird's self-attention layer is composed of three mechanisms: block sparse (local) self-attention, global self-attention, random self-attention - BigBird's self-attention layer is composed of three mechanisms: block sparse (local) self-attention, global self-attention, random self-attention
- BigBird's block sparse (local) self-attention is different from Longformer's local self-attention. How so? Why does that matter? => Can be deployed on TPU much easier this way - BigBird's block sparse (local) self-attention is different from Longformer's local self-attention. How so? Why does that matter? => Can be deployed on TPU much easier this way
- BigBird can be implemented for both an encoder-only model **and** - BigBird can be implemented for both an encoder-only model **and**
for an encoder-decoder model, which means that we can reuse lots of [code from RoBERTa](https://github.com/huggingface/transformers/blob/master/src/transformers/models/roberta/modeling_roberta.py) and [from PEGASUS](https://github.com/huggingface/transformers/blob/master/src/transformers/models/pegasus/modeling_pegasus.py) at a later stage. for an encoder-decoder model, which means that we can reuse lots of [code from RoBERTa](https://github.com/huggingface/transformers/blob/main/src/transformers/models/roberta/modeling_roberta.py) and [from PEGASUS](https://github.com/huggingface/transformers/blob/main/src/transformers/models/pegasus/modeling_pegasus.py) at a later stage.
If any of the mentioned aspects above are **not** clear to you, now is a great time to talk to Patrick. If any of the mentioned aspects above are **not** clear to you, now is a great time to talk to Patrick.
...@@ -569,12 +569,12 @@ Cookiecutter! ...@@ -569,12 +569,12 @@ Cookiecutter!
**Use the Cookiecutter to automatically generate the model's code** **Use the Cookiecutter to automatically generate the model's code**
To begin with head over to the [🤗 Transformers To begin with head over to the [🤗 Transformers
templates](https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model) templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)
to make use of our `cookiecutter` implementation to automatically to make use of our `cookiecutter` implementation to automatically
generate all the relevant files for your model. Again, we recommend only generate all the relevant files for your model. Again, we recommend only
adding the PyTorch version of the model at first. Make sure you follow adding the PyTorch version of the model at first. Make sure you follow
the instructions of the `README.md` on the [🤗 Transformers the instructions of the `README.md` on the [🤗 Transformers
templates](https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model) templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)
carefully. carefully.
Since you will first implement the Encoder-only/RoBERTa-like version of BigBird you should Since you will first implement the Encoder-only/RoBERTa-like version of BigBird you should
select the `is_encoder_decoder_model = False` option in the cookiecutter. Also, it is recommended select the `is_encoder_decoder_model = False` option in the cookiecutter. Also, it is recommended
...@@ -591,7 +591,7 @@ Transformers. ...@@ -591,7 +591,7 @@ Transformers.
You should do the following: You should do the following:
1. Create a branch with a descriptive name from your master branch 1. Create a branch with a descriptive name from your main branch
``` ```
git checkout -b add_big_bird git checkout -b add_big_bird
...@@ -604,11 +604,11 @@ You should do the following: ...@@ -604,11 +604,11 @@ You should do the following:
git commit git commit
``` ```
3. Fetch and rebase to current master 3. Fetch and rebase to current main
``` ```
git fetch upstream git fetch upstream
git rebase upstream/master git rebase upstream/main
``` ```
4. Push the changes to your account using: 4. Push the changes to your account using:
...@@ -627,10 +627,10 @@ You should do the following: ...@@ -627,10 +627,10 @@ You should do the following:
In the following, whenever you have done some progress, don't forget to In the following, whenever you have done some progress, don't forget to
commit your work and push it to your account so that it shows in the commit your work and push it to your account so that it shows in the
pull request. Additionally, you should make sure to update your work pull request. Additionally, you should make sure to update your work
with the current master from time to time by doing: with the current main from time to time by doing:
git fetch upstream git fetch upstream
git merge upstream/master git merge upstream/main
In general, all questions you might have regarding the model or your In general, all questions you might have regarding the model or your
implementation should be asked in your PR and discussed/solved in the implementation should be asked in your PR and discussed/solved in the
...@@ -1129,7 +1129,7 @@ for the community. ...@@ -1129,7 +1129,7 @@ for the community.
**14. Submit your finished PR** **14. Submit your finished PR**
You're done programming now and can move to the last step, which is You're done programming now and can move to the last step, which is
getting your PR merged into master. Usually, Patrick getting your PR merged into main. Usually, Patrick
should have helped you already at this point, but it is worth taking should have helped you already at this point, but it is worth taking
some time to give your finished PR a nice description and eventually add some time to give your finished PR a nice description and eventually add
comments to your code, if you want to point out certain design choices comments to your code, if you want to point out certain design choices
......
...@@ -12,7 +12,7 @@ This document explains the testing strategy for releasing the new Hugging Face D ...@@ -12,7 +12,7 @@ This document explains the testing strategy for releasing the new Hugging Face D
Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag.
``` ```
git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install master or adjust ist with vX.X.X for installing version specific-transforms git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install main or adjust ist with vX.X.X for installing version specific-transforms
``` ```
After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with:
......
git+https://github.com/huggingface/transformers.git@master # install master or adjust it with vX.X.X for installing version specific transforms git+https://github.com/huggingface/transformers.git@main # install main or adjust it with vX.X.X for installing version specific transforms
datasets==1.8.0 datasets==1.8.0
\ No newline at end of file
git+https://github.com/huggingface/transformers.git@master # install master or adjust ist with vX.X.X for installing version specific transforms git+https://github.com/huggingface/transformers.git@main # install main or adjust ist with vX.X.X for installing version specific transforms
\ No newline at end of file \ No newline at end of file
...@@ -125,9 +125,9 @@ class CopyCheckTester(unittest.TestCase): ...@@ -125,9 +125,9 @@ class CopyCheckTester(unittest.TestCase):
def test_convert_to_localized_md(self): def test_convert_to_localized_md(self):
localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"] localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning." md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."
localized_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" localized_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。\n" converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。\n"
num_models_equal, converted_md_list = check_copies.convert_to_localized_md( num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
md_list, localized_md_list, localized_readme["format_model_list"] md_list, localized_md_list, localized_readme["format_model_list"]
...@@ -144,7 +144,7 @@ class CopyCheckTester(unittest.TestCase): ...@@ -144,7 +144,7 @@ class CopyCheckTester(unittest.TestCase):
self.assertTrue(num_models_equal) self.assertTrue(num_models_equal)
link_changed_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." link_changed_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
link_unchanged_md_list = "1. **[ALBERT](https://huggingface.co/transformers/master/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" link_unchanged_md_list = "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
num_models_equal, converted_md_list = check_copies.convert_to_localized_md( num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
......
...@@ -332,7 +332,7 @@ def convert_to_localized_md(model_list, localized_model_list, format_str): ...@@ -332,7 +332,7 @@ def convert_to_localized_md(model_list, localized_model_list, format_str):
def convert_readme_to_index(model_list): def convert_readme_to_index(model_list):
model_list = model_list.replace("https://huggingface.co/docs/transformers/master/", "") model_list = model_list.replace("https://huggingface.co/docs/transformers/main/", "")
return model_list.replace("https://huggingface.co/docs/transformers/", "") return model_list.replace("https://huggingface.co/docs/transformers/", "")
......
...@@ -24,7 +24,7 @@ import subprocess ...@@ -24,7 +24,7 @@ import subprocess
import sys import sys
fork_point_sha = subprocess.check_output("git merge-base master HEAD".split()).decode("utf-8") fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split() modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
joined_dirs = "|".join(sys.argv[1:]) joined_dirs = "|".join(sys.argv[1:])
......
...@@ -67,7 +67,7 @@ def global_version_update(version, patch=False): ...@@ -67,7 +67,7 @@ def global_version_update(version, patch=False):
def clean_master_ref_in_model_list(): def clean_master_ref_in_model_list():
"""Replace the links from master doc tp stable doc in the model list of the README.""" """Replace the links from main doc tp stable doc in the model list of the README."""
# If the introduction or the conclusion of the list change, the prompts may need to be updated. # If the introduction or the conclusion of the list change, the prompts may need to be updated.
_start_prompt = "🤗 Transformers currently provides the following architectures" _start_prompt = "🤗 Transformers currently provides the following architectures"
_end_prompt = "1. Want to contribute a new model?" _end_prompt = "1. Want to contribute a new model?"
...@@ -85,7 +85,7 @@ def clean_master_ref_in_model_list(): ...@@ -85,7 +85,7 @@ def clean_master_ref_in_model_list():
while not lines[index].startswith(_end_prompt): while not lines[index].startswith(_end_prompt):
if lines[index].startswith("1."): if lines[index].startswith("1."):
lines[index] = lines[index].replace( lines[index] = lines[index].replace(
"https://huggingface.co/docs/transformers/master/model_doc", "https://huggingface.co/docs/transformers/main/model_doc",
"https://huggingface.co/docs/transformers/model_doc", "https://huggingface.co/docs/transformers/model_doc",
) )
index += 1 index += 1
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment