Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
40ea9ab2
Unverified
Commit
40ea9ab2
authored
Oct 12, 2023
by
Tom Aarsen
Committed by
GitHub
Oct 12, 2023
Browse files
Add many missing spaces in adjacent strings (#26751)
Add missing spaces in adjacent strings
parent
3bc65505
Changes
154
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
56 additions
and
56 deletions
+56
-56
examples/pytorch/image-classification/run_image_classification_no_trainer.py
...age-classification/run_image_classification_no_trainer.py
+2
-2
examples/pytorch/image-pretraining/run_mim.py
examples/pytorch/image-pretraining/run_mim.py
+1
-1
examples/pytorch/image-pretraining/run_mim_no_trainer.py
examples/pytorch/image-pretraining/run_mim_no_trainer.py
+2
-2
examples/pytorch/language-modeling/run_clm.py
examples/pytorch/language-modeling/run_clm.py
+4
-4
examples/pytorch/language-modeling/run_clm_no_trainer.py
examples/pytorch/language-modeling/run_clm_no_trainer.py
+5
-5
examples/pytorch/language-modeling/run_mlm.py
examples/pytorch/language-modeling/run_mlm.py
+4
-4
examples/pytorch/language-modeling/run_mlm_no_trainer.py
examples/pytorch/language-modeling/run_mlm_no_trainer.py
+5
-5
examples/pytorch/language-modeling/run_plm.py
examples/pytorch/language-modeling/run_plm.py
+3
-3
examples/pytorch/multiple-choice/run_swag.py
examples/pytorch/multiple-choice/run_swag.py
+2
-2
examples/pytorch/multiple-choice/run_swag_no_trainer.py
examples/pytorch/multiple-choice/run_swag_no_trainer.py
+3
-3
examples/pytorch/question-answering/run_qa.py
examples/pytorch/question-answering/run_qa.py
+2
-2
examples/pytorch/question-answering/run_qa_beam_search.py
examples/pytorch/question-answering/run_qa_beam_search.py
+1
-1
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
...torch/question-answering/run_qa_beam_search_no_trainer.py
+1
-1
examples/pytorch/question-answering/run_qa_no_trainer.py
examples/pytorch/question-answering/run_qa_no_trainer.py
+4
-4
examples/pytorch/question-answering/run_seq2seq_qa.py
examples/pytorch/question-answering/run_seq2seq_qa.py
+4
-4
examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
...ytorch/semantic-segmentation/run_semantic_segmentation.py
+1
-1
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
...ntic-segmentation/run_semantic_segmentation_no_trainer.py
+2
-2
examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
.../pytorch/speech-recognition/run_speech_recognition_ctc.py
+4
-4
examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
.../speech-recognition/run_speech_recognition_ctc_adapter.py
+4
-4
examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
...orch/speech-recognition/run_speech_recognition_seq2seq.py
+2
-2
No files found.
examples/pytorch/image-classification/run_image_classification_no_trainer.py
View file @
40ea9ab2
...
...
@@ -152,7 +152,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -179,7 +179,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
examples/pytorch/image-pretraining/run_mim.py
View file @
40ea9ab2
...
...
@@ -174,7 +174,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
examples/pytorch/image-pretraining/run_mim_no_trainer.py
View file @
40ea9ab2
...
...
@@ -208,7 +208,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -246,7 +246,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
examples/pytorch/language-modeling/run_clm.py
View file @
40ea9ab2
...
...
@@ -132,7 +132,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -151,7 +151,7 @@ class ModelArguments:
default
=
False
,
metadata
=
{
"help"
:
(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.
"
"set True will benefit LLM loading time and RAM consumption."
)
},
...
...
@@ -424,7 +424,7 @@ def main():
tokenizer
=
AutoTokenizer
.
from_pretrained
(
model_args
.
model_name_or_path
,
**
tokenizer_kwargs
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -506,7 +506,7 @@ def main():
else
:
if
data_args
.
block_size
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The block_size passed (
{
data_args
.
block_size
}
) is larger than the maximum length for the model"
f
"The block_size passed (
{
data_args
.
block_size
}
) is larger than the maximum length for the model
"
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
)
block_size
=
min
(
data_args
.
block_size
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/language-modeling/run_clm_no_trainer.py
View file @
40ea9ab2
...
...
@@ -199,7 +199,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -226,7 +226,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
@@ -234,7 +234,7 @@ def parse_args():
"--low_cpu_mem_usage"
,
action
=
"store_true"
,
help
=
(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.
"
"If passed, LLM loading time and RAM consumption will be benefited."
),
)
...
...
@@ -398,7 +398,7 @@ def main():
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -449,7 +449,7 @@ def main():
else
:
if
args
.
block_size
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The block_size passed (
{
args
.
block_size
}
) is larger than the maximum length for the model"
f
"The block_size passed (
{
args
.
block_size
}
) is larger than the maximum length for the model
"
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
)
block_size
=
min
(
args
.
block_size
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/language-modeling/run_mlm.py
View file @
40ea9ab2
...
...
@@ -128,7 +128,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -137,7 +137,7 @@ class ModelArguments:
default
=
False
,
metadata
=
{
"help"
:
(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.
"
"set True will benefit LLM loading time and RAM consumption."
)
},
...
...
@@ -417,7 +417,7 @@ def main():
tokenizer
=
AutoTokenizer
.
from_pretrained
(
model_args
.
model_name_or_path
,
**
tokenizer_kwargs
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -462,7 +462,7 @@ def main():
else
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/language-modeling/run_mlm_no_trainer.py
View file @
40ea9ab2
...
...
@@ -206,7 +206,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -233,7 +233,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
@@ -241,7 +241,7 @@ def parse_args():
"--low_cpu_mem_usage"
,
action
=
"store_true"
,
help
=
(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.
"
"If passed, LLM loading time and RAM consumption will be benefited."
),
)
...
...
@@ -395,7 +395,7 @@ def main():
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -434,7 +434,7 @@ def main():
else
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/language-modeling/run_plm.py
View file @
40ea9ab2
...
...
@@ -115,7 +115,7 @@ class ModelArguments:
default
=
False
,
metadata
=
{
"help"
:
(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.
"
"set True will benefit LLM loading time and RAM consumption."
)
},
...
...
@@ -385,7 +385,7 @@ def main():
tokenizer
=
AutoTokenizer
.
from_pretrained
(
model_args
.
model_name_or_path
,
**
tokenizer_kwargs
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -419,7 +419,7 @@ def main():
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/multiple-choice/run_swag.py
View file @
40ea9ab2
...
...
@@ -100,7 +100,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -375,7 +375,7 @@ def main():
else
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/multiple-choice/run_swag_no_trainer.py
View file @
40ea9ab2
...
...
@@ -188,7 +188,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -215,7 +215,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
@@ -401,7 +401,7 @@ def main():
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
examples/pytorch/question-answering/run_qa.py
View file @
40ea9ab2
...
...
@@ -100,7 +100,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -391,7 +391,7 @@ def main():
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/question-answering/run_qa_beam_search.py
View file @
40ea9ab2
...
...
@@ -367,7 +367,7 @@ def main():
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
View file @
40ea9ab2
...
...
@@ -395,7 +395,7 @@ def main():
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
...
...
examples/pytorch/question-answering/run_qa_no_trainer.py
View file @
40ea9ab2
...
...
@@ -279,7 +279,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -306,7 +306,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
@@ -442,7 +442,7 @@ def main():
)
else
:
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
...
...
@@ -471,7 +471,7 @@ def main():
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
...
...
examples/pytorch/question-answering/run_seq2seq_qa.py
View file @
40ea9ab2
...
...
@@ -101,7 +101,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -171,7 +171,7 @@ class DataTrainingArguments:
metadata
=
{
"help"
:
(
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`."
"than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`.
"
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
...
...
@@ -465,13 +465,13 @@ def main():
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
logger
.
warning
(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for
"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
)
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
View file @
40ea9ab2
...
...
@@ -262,7 +262,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
View file @
40ea9ab2
...
...
@@ -279,7 +279,7 @@ def parse_args():
default
=
False
,
help
=
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
),
)
...
...
@@ -307,7 +307,7 @@ def parse_args():
default
=
"all"
,
help
=
(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.
'
"Only applicable when `--with_tracking` is passed."
),
)
...
...
examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
View file @
40ea9ab2
...
...
@@ -104,8 +104,8 @@ class ModelArguments:
default
=
0.05
,
metadata
=
{
"help"
:
(
"Probability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"Probability of each feature vector along the time axis to be chosen as the start of the vector
"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature
"
"vectors will be masked along the time axis."
)
},
...
...
@@ -249,7 +249,7 @@ class DataTrainingArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -430,7 +430,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
View file @
40ea9ab2
...
...
@@ -90,8 +90,8 @@ class ModelArguments:
default
=
0.05
,
metadata
=
{
"help"
:
(
"Probability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"Probability of each feature vector along the time axis to be chosen as the start of the vector
"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature
"
"vectors will be masked along the time axis."
)
},
...
...
@@ -252,7 +252,7 @@ class DataTrainingArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -426,7 +426,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
View file @
40ea9ab2
...
...
@@ -106,7 +106,7 @@ class ModelArguments:
metadata
=
{
"help"
:
(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will
"
"execute code present on the Hub on your local machine."
)
},
...
...
@@ -322,7 +322,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
Prev
1
2
3
4
5
6
…
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment