Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
57420b10
Unverified
Commit
57420b10
authored
Oct 07, 2021
by
Alex Hedges
Committed by
GitHub
Oct 07, 2021
Browse files
Add missing whitespace to multiline strings (#13916)
parent
319beb64
Changes
56
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
34 additions
and
34 deletions
+34
-34
src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
...odels/speech_to_text/feature_extraction_speech_to_text.py
+2
-2
src/transformers/models/squeezebert/modeling_squeezebert.py
src/transformers/models/squeezebert/modeling_squeezebert.py
+1
-1
src/transformers/models/tapas/modeling_tapas.py
src/transformers/models/tapas/modeling_tapas.py
+2
-2
src/transformers/models/tapas/tokenization_tapas.py
src/transformers/models/tapas/tokenization_tapas.py
+3
-3
src/transformers/models/transfo_xl/tokenization_transfo_xl.py
...transformers/models/transfo_xl/tokenization_transfo_xl.py
+1
-1
src/transformers/models/visual_bert/modeling_visual_bert.py
src/transformers/models/visual_bert/modeling_visual_bert.py
+3
-3
src/transformers/models/vit/feature_extraction_vit.py
src/transformers/models/vit/feature_extraction_vit.py
+1
-1
src/transformers/models/wav2vec2/configuration_wav2vec2.py
src/transformers/models/wav2vec2/configuration_wav2vec2.py
+3
-3
src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py
...ansformers/models/wav2vec2/feature_extraction_wav2vec2.py
+2
-2
src/transformers/models/wav2vec2/modeling_wav2vec2.py
src/transformers/models/wav2vec2/modeling_wav2vec2.py
+1
-1
src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
...mers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
+2
-2
src/transformers/onnx/features.py
src/transformers/onnx/features.py
+1
-1
src/transformers/tokenization_utils.py
src/transformers/tokenization_utils.py
+3
-3
src/transformers/tokenization_utils_base.py
src/transformers/tokenization_utils_base.py
+2
-2
src/transformers/trainer.py
src/transformers/trainer.py
+3
-3
src/transformers/training_args.py
src/transformers/training_args.py
+4
-4
No files found.
src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
View file @
57420b10
...
@@ -189,12 +189,12 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
...
@@ -189,12 +189,12 @@ class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
if
sampling_rate
is
not
None
:
if
sampling_rate
is
not
None
:
if
sampling_rate
!=
self
.
sampling_rate
:
if
sampling_rate
!=
self
.
sampling_rate
:
raise
ValueError
(
raise
ValueError
(
f
"The model corresponding to this feature extractor:
{
self
}
was trained using a sampling rate of
{
self
.
sampling_rate
}
."
f
"The model corresponding to this feature extractor:
{
self
}
was trained using a sampling rate of
{
self
.
sampling_rate
}
.
"
f
"Please make sure that the provided `raw_speech` input was sampled with
{
self
.
sampling_rate
}
and not
{
sampling_rate
}
."
f
"Please make sure that the provided `raw_speech` input was sampled with
{
self
.
sampling_rate
}
and not
{
sampling_rate
}
."
)
)
else
:
else
:
logger
.
warning
(
logger
.
warning
(
"It is strongly recommended to pass the `sampling_rate` argument to this function."
"It is strongly recommended to pass the `sampling_rate` argument to this function.
"
"Failing to do so can result in silent errors that might be hard to debug."
"Failing to do so can result in silent errors that might be hard to debug."
)
)
...
...
src/transformers/models/squeezebert/modeling_squeezebert.py
View file @
57420b10
...
@@ -300,7 +300,7 @@ class SqueezeBertEncoder(nn.Module):
...
@@ -300,7 +300,7 @@ class SqueezeBertEncoder(nn.Module):
super
().
__init__
()
super
().
__init__
()
assert
config
.
embedding_size
==
config
.
hidden_size
,
(
assert
config
.
embedding_size
==
config
.
hidden_size
,
(
"If you want embedding_size != intermediate hidden_size,"
"If you want embedding_size != intermediate hidden_size,
"
"please insert a Conv1d layer to adjust the number of channels "
"please insert a Conv1d layer to adjust the number of channels "
"before the first SqueezeBertModule."
"before the first SqueezeBertModule."
)
)
...
...
src/transformers/models/tapas/modeling_tapas.py
View file @
57420b10
...
@@ -54,8 +54,8 @@ if is_scatter_available():
...
@@ -54,8 +54,8 @@ if is_scatter_available():
from
torch_scatter
import
scatter
from
torch_scatter
import
scatter
except
OSError
:
except
OSError
:
logger
.
error
(
logger
.
error
(
"TAPAS models are not usable since `torch_scatter` can't be loaded."
"TAPAS models are not usable since `torch_scatter` can't be loaded.
"
"It seems you have `torch_scatter` installed with the wrong CUDA version."
"It seems you have `torch_scatter` installed with the wrong CUDA version.
"
"Please try to reinstall it following the instructions here: https://github.com/rusty1s/pytorch_scatter."
"Please try to reinstall it following the instructions here: https://github.com/rusty1s/pytorch_scatter."
)
)
...
...
src/transformers/models/tapas/tokenization_tapas.py
View file @
57420b10
...
@@ -712,7 +712,7 @@ class TapasTokenizer(PreTrainedTokenizer):
...
@@ -712,7 +712,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if
return_offsets_mapping
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast."
)
)
...
@@ -981,7 +981,7 @@ class TapasTokenizer(PreTrainedTokenizer):
...
@@ -981,7 +981,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if
return_offsets_mapping
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast."
)
)
...
@@ -1159,7 +1159,7 @@ class TapasTokenizer(PreTrainedTokenizer):
...
@@ -1159,7 +1159,7 @@ class TapasTokenizer(PreTrainedTokenizer):
if
max_length
is
not
None
and
len
(
input_ids
)
>
max_length
:
if
max_length
is
not
None
and
len
(
input_ids
)
>
max_length
:
raise
ValueError
(
raise
ValueError
(
"Could not encode the query and table header given the maximum length. Encoding the query and table"
"Could not encode the query and table header given the maximum length. Encoding the query and table
"
f
"header results in a length of
{
len
(
input_ids
)
}
which is higher than the max_length of
{
max_length
}
"
f
"header results in a length of
{
len
(
input_ids
)
}
which is higher than the max_length of
{
max_length
}
"
)
)
...
...
src/transformers/models/transfo_xl/tokenization_transfo_xl.py
View file @
57420b10
...
@@ -236,7 +236,7 @@ class TransfoXLTokenizer(PreTrainedTokenizer):
...
@@ -236,7 +236,7 @@ class TransfoXLTokenizer(PreTrainedTokenizer):
except
Exception
as
e
:
except
Exception
as
e
:
raise
ValueError
(
raise
ValueError
(
f
"Unable to parse file
{
pretrained_vocab_file
}
. Unknown format. "
f
"Unable to parse file
{
pretrained_vocab_file
}
. Unknown format. "
"If you tried to load a model saved through TransfoXLTokenizerFast,"
"If you tried to load a model saved through TransfoXLTokenizerFast,
"
"please note they are not compatible."
"please note they are not compatible."
)
from
e
)
from
e
...
...
src/transformers/models/visual_bert/modeling_visual_bert.py
View file @
57420b10
...
@@ -174,7 +174,7 @@ class VisualBertEmbeddings(nn.Module):
...
@@ -174,7 +174,7 @@ class VisualBertEmbeddings(nn.Module):
if
visual_position_embeddings
.
size
(
1
)
!=
visual_embeds
.
size
(
1
):
if
visual_position_embeddings
.
size
(
1
)
!=
visual_embeds
.
size
(
1
):
if
visual_position_embeddings
.
size
(
1
)
<
visual_embeds
.
size
(
1
):
if
visual_position_embeddings
.
size
(
1
)
<
visual_embeds
.
size
(
1
):
raise
ValueError
(
raise
ValueError
(
f
"Visual position embeddings length:
{
visual_position_embeddings
.
size
(
1
)
}
"
f
"Visual position embeddings length:
{
visual_position_embeddings
.
size
(
1
)
}
"
f
"should be the same as `visual_embeds` length:
{
visual_embeds
.
size
(
1
)
}
"
f
"should be the same as `visual_embeds` length:
{
visual_embeds
.
size
(
1
)
}
"
)
)
visual_position_embeddings
=
visual_position_embeddings
[:,
:
visual_embeds
.
size
(
1
),
:]
visual_position_embeddings
=
visual_position_embeddings
[:,
:
visual_embeds
.
size
(
1
),
:]
...
@@ -973,7 +973,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
...
@@ -973,7 +973,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
total_size
=
attention_mask
.
size
(
-
1
)
+
visual_attention_mask
.
size
(
-
1
)
total_size
=
attention_mask
.
size
(
-
1
)
+
visual_attention_mask
.
size
(
-
1
)
if
labels
.
size
(
-
1
)
!=
total_size
:
if
labels
.
size
(
-
1
)
!=
total_size
:
raise
ValueError
(
raise
ValueError
(
f
"The labels provided should have same sequence length as total attention mask."
f
"The labels provided should have same sequence length as total attention mask.
"
f
"Found labels with sequence length
{
labels
.
size
(
-
1
)
}
, expected
{
total_size
}
."
f
"Found labels with sequence length
{
labels
.
size
(
-
1
)
}
, expected
{
total_size
}
."
)
)
...
@@ -986,7 +986,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
...
@@ -986,7 +986,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
total_size
=
attention_mask
.
size
(
-
1
)
+
visual_attention_mask
.
size
(
-
1
)
total_size
=
attention_mask
.
size
(
-
1
)
+
visual_attention_mask
.
size
(
-
1
)
if
labels
.
size
(
-
1
)
!=
total_size
:
if
labels
.
size
(
-
1
)
!=
total_size
:
raise
ValueError
(
raise
ValueError
(
f
"The labels provided should have same sequence length as total attention mask."
f
"The labels provided should have same sequence length as total attention mask.
"
f
"Found labels with sequence length
{
labels
.
size
(
-
1
)
}
, expected
{
total_size
}
."
f
"Found labels with sequence length
{
labels
.
size
(
-
1
)
}
, expected
{
total_size
}
."
)
)
...
...
src/transformers/models/vit/feature_extraction_vit.py
View file @
57420b10
...
@@ -122,7 +122,7 @@ class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
...
@@ -122,7 +122,7 @@ class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
if
not
valid_images
:
if
not
valid_images
:
raise
ValueError
(
raise
ValueError
(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),
"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
)
...
...
src/transformers/models/wav2vec2/configuration_wav2vec2.py
View file @
57420b10
...
@@ -237,9 +237,9 @@ class Wav2Vec2Config(PretrainedConfig):
...
@@ -237,9 +237,9 @@ class Wav2Vec2Config(PretrainedConfig):
or
(
len
(
self
.
conv_dim
)
!=
self
.
num_feat_extract_layers
)
or
(
len
(
self
.
conv_dim
)
!=
self
.
num_feat_extract_layers
)
):
):
raise
ValueError
(
raise
ValueError
(
"Configuration for convolutional layers is incorrect."
"Configuration for convolutional layers is incorrect.
"
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,
"
f
"but is `len(config.conv_dim) =
{
len
(
self
.
conv_dim
)
}
`, `len(config.conv_stride)"
f
"but is `len(config.conv_dim) =
{
len
(
self
.
conv_dim
)
}
`, `len(config.conv_stride)
"
f
"=
{
len
(
self
.
conv_stride
)
}
`, `len(config.conv_kernel) =
{
len
(
self
.
conv_kernel
)
}
`."
f
"=
{
len
(
self
.
conv_stride
)
}
`, `len(config.conv_kernel) =
{
len
(
self
.
conv_kernel
)
}
`."
)
)
...
...
src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py
View file @
57420b10
...
@@ -170,12 +170,12 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
...
@@ -170,12 +170,12 @@ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
if
sampling_rate
is
not
None
:
if
sampling_rate
is
not
None
:
if
sampling_rate
!=
self
.
sampling_rate
:
if
sampling_rate
!=
self
.
sampling_rate
:
raise
ValueError
(
raise
ValueError
(
f
"The model corresponding to this feature extractor:
{
self
}
was trained using a sampling rate of
{
self
.
sampling_rate
}
."
f
"The model corresponding to this feature extractor:
{
self
}
was trained using a sampling rate of
{
self
.
sampling_rate
}
.
"
f
"Please make sure that the provided `raw_speech` input was sampled with
{
self
.
sampling_rate
}
and not
{
sampling_rate
}
."
f
"Please make sure that the provided `raw_speech` input was sampled with
{
self
.
sampling_rate
}
and not
{
sampling_rate
}
."
)
)
else
:
else
:
logger
.
warning
(
logger
.
warning
(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function."
"It is strongly recommended to pass the ``sampling_rate`` argument to this function.
"
"Failing to do so can result in silent errors that might be hard to debug."
"Failing to do so can result in silent errors that might be hard to debug."
)
)
...
...
src/transformers/models/wav2vec2/modeling_wav2vec2.py
View file @
57420b10
...
@@ -1421,7 +1421,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel):
...
@@ -1421,7 +1421,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel):
raise
ValueError
(
raise
ValueError
(
f
"You are trying to instantiate
{
self
.
__class__
}
with a configuration that "
f
"You are trying to instantiate
{
self
.
__class__
}
with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`."
"instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`.
"
"or define `vocab_size` of your model's configuration."
"or define `vocab_size` of your model's configuration."
)
)
self
.
lm_head
=
nn
.
Linear
(
config
.
hidden_size
,
config
.
vocab_size
)
self
.
lm_head
=
nn
.
Linear
(
config
.
hidden_size
,
config
.
vocab_size
)
...
...
src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
View file @
57420b10
...
@@ -152,7 +152,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
...
@@ -152,7 +152,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
import
sentencepiece
as
spm
import
sentencepiece
as
spm
except
ImportError
:
except
ImportError
:
logger
.
warning
(
logger
.
warning
(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece
"
"pip install sentencepiece"
"pip install sentencepiece"
)
)
raise
raise
...
@@ -191,7 +191,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
...
@@ -191,7 +191,7 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
import
sentencepiece
as
spm
import
sentencepiece
as
spm
except
ImportError
:
except
ImportError
:
logger
.
warning
(
logger
.
warning
(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece
"
"pip install sentencepiece"
"pip install sentencepiece"
)
)
raise
raise
...
...
src/transformers/onnx/features.py
View file @
57420b10
...
@@ -109,7 +109,7 @@ class FeaturesManager:
...
@@ -109,7 +109,7 @@ class FeaturesManager:
task
=
FeaturesManager
.
feature_to_task
(
feature
)
task
=
FeaturesManager
.
feature_to_task
(
feature
)
if
task
not
in
FeaturesManager
.
_TASKS_TO_AUTOMODELS
:
if
task
not
in
FeaturesManager
.
_TASKS_TO_AUTOMODELS
:
raise
KeyError
(
raise
KeyError
(
f
"Unknown task:
{
feature
}
."
f
"Unknown task:
{
feature
}
.
"
f
"Possible values are
{
list
(
FeaturesManager
.
_TASKS_TO_AUTOMODELS
.
values
())
}
"
f
"Possible values are
{
list
(
FeaturesManager
.
_TASKS_TO_AUTOMODELS
.
values
())
}
"
)
)
...
...
src/transformers/tokenization_utils.py
View file @
57420b10
...
@@ -596,9 +596,9 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
...
@@ -596,9 +596,9 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
if
return_offsets_mapping
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast.
"
"More information on available tokenizers at "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
"https://github.com/huggingface/transformers/pull/2674"
)
)
...
@@ -673,7 +673,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
...
@@ -673,7 +673,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
if
return_offsets_mapping
:
if
return_offsets_mapping
:
raise
NotImplementedError
(
raise
NotImplementedError
(
"return_offset_mapping is not available when using Python tokenizers."
"return_offset_mapping is not available when using Python tokenizers.
"
"To use this feature, change your tokenizer to one deriving from "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"transformers.PreTrainedTokenizerFast."
)
)
...
...
src/transformers/tokenization_utils_base.py
View file @
57420b10
...
@@ -3059,7 +3059,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
...
@@ -3059,7 +3059,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
pair_ids
=
pair_ids
[:
-
num_tokens_to_remove
]
pair_ids
=
pair_ids
[:
-
num_tokens_to_remove
]
else
:
else
:
logger
.
error
(
logger
.
error
(
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input"
f
"We need to remove
{
num_tokens_to_remove
}
to truncate the input
"
f
"but the second sequence has a length
{
len
(
pair_ids
)
}
. "
f
"but the second sequence has a length
{
len
(
pair_ids
)
}
. "
f
"Please select another truncation strategy than
{
truncation_strategy
}
, "
f
"Please select another truncation strategy than
{
truncation_strategy
}
, "
f
"for instance 'longest_first' or 'only_first'."
f
"for instance 'longest_first' or 'only_first'."
...
@@ -3250,7 +3250,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
...
@@ -3250,7 +3250,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
"""
"""
assert
already_has_special_tokens
and
token_ids_1
is
None
,
(
assert
already_has_special_tokens
and
token_ids_1
is
None
,
(
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument."
"Please use a slow (full python) tokenizer to activate this argument.
"
"Or set `return_special_tokens_mask=True` when calling the encoding method "
"Or set `return_special_tokens_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
"to get the special tokens mask in any tokenizer. "
)
)
...
...
src/transformers/trainer.py
View file @
57420b10
...
@@ -385,7 +385,7 @@ class Trainer:
...
@@ -385,7 +385,7 @@ class Trainer:
self
.
optimizer
,
self
.
lr_scheduler
=
optimizers
self
.
optimizer
,
self
.
lr_scheduler
=
optimizers
if
model_init
is
not
None
and
(
self
.
optimizer
is
not
None
or
self
.
lr_scheduler
is
not
None
):
if
model_init
is
not
None
and
(
self
.
optimizer
is
not
None
or
self
.
lr_scheduler
is
not
None
):
raise
RuntimeError
(
raise
RuntimeError
(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"Passing a `model_init` is incompatible with providing the `optimizers` argument.
"
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
)
default_callbacks
=
DEFAULT_CALLBACKS
+
get_reporting_integration_callbacks
(
self
.
args
.
report_to
)
default_callbacks
=
DEFAULT_CALLBACKS
+
get_reporting_integration_callbacks
(
self
.
args
.
report_to
)
...
@@ -1737,8 +1737,8 @@ class Trainer:
...
@@ -1737,8 +1737,8 @@ class Trainer:
if
backend
is
None
:
if
backend
is
None
:
raise
RuntimeError
(
raise
RuntimeError
(
"At least one of optuna or ray should be installed. "
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install optuna run `pip install optuna`.
"
"To install ray run `pip install ray[tune]`."
"To install ray run `pip install ray[tune]`.
"
"To install sigopt run `pip install sigopt`."
"To install sigopt run `pip install sigopt`."
)
)
backend
=
HPSearchBackend
(
backend
)
backend
=
HPSearchBackend
(
backend
)
...
...
src/transformers/training_args.py
View file @
57420b10
...
@@ -385,7 +385,7 @@ class TrainingArguments:
...
@@ -385,7 +385,7 @@ class TrainingArguments:
default
=
False
,
default
=
False
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Overwrite the content of the output directory."
"Overwrite the content of the output directory.
"
"Use this to continue training if output_dir points to a checkpoint directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
)
},
},
...
@@ -420,7 +420,7 @@ class TrainingArguments:
...
@@ -420,7 +420,7 @@ class TrainingArguments:
per_gpu_eval_batch_size
:
Optional
[
int
]
=
field
(
per_gpu_eval_batch_size
:
Optional
[
int
]
=
field
(
default
=
None
,
default
=
None
,
metadata
=
{
metadata
=
{
"help"
:
"Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"help"
:
"Deprecated, the use of `--per_device_eval_batch_size` is preferred.
"
"Batch size per GPU/TPU core/CPU for evaluation."
"Batch size per GPU/TPU core/CPU for evaluation."
},
},
)
)
...
@@ -492,7 +492,7 @@ class TrainingArguments:
...
@@ -492,7 +492,7 @@ class TrainingArguments:
default
=
None
,
default
=
None
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Limit the total amount of checkpoints."
"Limit the total amount of checkpoints.
"
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
)
},
},
...
@@ -514,7 +514,7 @@ class TrainingArguments:
...
@@ -514,7 +514,7 @@ class TrainingArguments:
default
=
"O1"
,
default
=
"O1"
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
)
)
},
},
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment