Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
c9035e45
Unverified
Commit
c9035e45
authored
Apr 07, 2021
by
Stas Bekman
Committed by
GitHub
Apr 07, 2021
Browse files
fix: The 'warn' method is deprecated (#11105)
* The 'warn' method is deprecated * fix test
parent
247bed38
Changes
52
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
17 additions
and
17 deletions
+17
-17
src/transformers/models/roberta/modeling_roberta.py
src/transformers/models/roberta/modeling_roberta.py
+1
-1
src/transformers/models/speech_to_text/modeling_speech_to_text.py
...sformers/models/speech_to_text/modeling_speech_to_text.py
+1
-1
src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py
...onvert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py
+1
-1
src/transformers/models/xlm/modeling_tf_xlm.py
src/transformers/models/xlm/modeling_tf_xlm.py
+1
-1
src/transformers/models/xlm/modeling_xlm.py
src/transformers/models/xlm/modeling_xlm.py
+1
-1
src/transformers/pipelines/zero_shot_classification.py
src/transformers/pipelines/zero_shot_classification.py
+1
-1
src/transformers/trainer_callback.py
src/transformers/trainer_callback.py
+2
-2
src/transformers/trainer_pt_utils.py
src/transformers/trainer_pt_utils.py
+2
-2
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
...elname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
+2
-2
tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
...s/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
+2
-2
tests/test_logging.py
tests/test_logging.py
+2
-2
tests/test_trainer_callback.py
tests/test_trainer_callback.py
+1
-1
No files found.
src/transformers/models/roberta/modeling_roberta.py
View file @
c9035e45
...
...
@@ -484,7 +484,7 @@ class RobertaEncoder(nn.Module):
if
getattr
(
self
.
config
,
"gradient_checkpointing"
,
False
)
and
self
.
training
:
if
use_cache
:
logger
.
warn
(
logger
.
warn
ing
(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
...
...
src/transformers/models/speech_to_text/modeling_speech_to_text.py
View file @
c9035e45
...
...
@@ -1015,7 +1015,7 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel):
if
getattr
(
self
.
config
,
"gradient_checkpointing"
,
False
)
and
self
.
training
:
if
use_cache
:
logger
.
warn
(
logger
.
warn
ing
(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
use_cache
=
False
...
...
src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py
View file @
c9035e45
...
...
@@ -111,7 +111,7 @@ def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
if
not
is_used
:
unused_weights
.
append
(
name
)
logger
.
warn
(
f
"Unused weights:
{
unused_weights
}
"
)
logger
.
warn
ing
(
f
"Unused weights:
{
unused_weights
}
"
)
def
load_conv_layer
(
full_name
,
value
,
feature_extractor
,
unused_weights
,
use_group_norm
):
...
...
src/transformers/models/xlm/modeling_tf_xlm.py
View file @
c9035e45
...
...
@@ -1140,7 +1140,7 @@ class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss):
)
if
inputs
[
"lengths"
]
is
not
None
:
logger
.
warn
(
logger
.
warn
ing
(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead."
,
)
...
...
src/transformers/models/xlm/modeling_xlm.py
View file @
c9035e45
...
...
@@ -1232,7 +1232,7 @@ class XLMForMultipleChoice(XLMPreTrainedModel):
)
if
lengths
is
not
None
:
logger
.
warn
(
logger
.
warn
ing
(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead."
)
...
...
src/transformers/pipelines/zero_shot_classification.py
View file @
c9035e45
...
...
@@ -142,7 +142,7 @@ class ZeroShotClassificationPipeline(Pipeline):
"""
if
"multi_class"
in
kwargs
and
kwargs
[
"multi_class"
]
is
not
None
:
multi_label
=
kwargs
.
pop
(
"multi_class"
)
logger
.
warn
(
logger
.
warn
ing
(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)
...
...
src/transformers/trainer_callback.py
View file @
c9035e45
...
...
@@ -289,7 +289,7 @@ class CallbackHandler(TrainerCallback):
self
.
eval_dataloader
=
None
if
not
any
(
isinstance
(
cb
,
DefaultFlowCallback
)
for
cb
in
self
.
callbacks
):
logger
.
warn
(
logger
.
warn
ing
(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You
\n
"
+
"should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+
"callbacks is
\n
:"
...
...
@@ -300,7 +300,7 @@ class CallbackHandler(TrainerCallback):
cb
=
callback
()
if
isinstance
(
callback
,
type
)
else
callback
cb_class
=
callback
if
isinstance
(
callback
,
type
)
else
callback
.
__class__
if
cb_class
in
[
c
.
__class__
for
c
in
self
.
callbacks
]:
logger
.
warn
(
logger
.
warn
ing
(
f
"You are adding a
{
cb_class
}
to the callbacks of this Trainer, but there is already one. The current"
+
"list of callbacks is
\n
:"
+
self
.
callback_list
...
...
src/transformers/trainer_pt_utils.py
View file @
c9035e45
...
...
@@ -391,7 +391,7 @@ class DistributedTensorGatherer:
if
self
.
_storage
is
None
:
return
if
self
.
_offsets
[
0
]
!=
self
.
process_length
:
logger
.
warn
(
"Not all data has been set. Are you sure you passed all values?"
)
logger
.
warn
ing
(
"Not all data has been set. Are you sure you passed all values?"
)
return
nested_truncate
(
self
.
_storage
,
self
.
num_samples
)
...
...
@@ -589,7 +589,7 @@ def _get_learning_rate(self):
last_lr
=
self
.
lr_scheduler
.
get_last_lr
()[
0
]
except
AssertionError
as
e
:
if
"need to call step"
in
str
(
e
):
logger
.
warn
(
"tried to get lr value before scheduler/optimizer started stepping, returning lr=0"
)
logger
.
warn
ing
(
"tried to get lr value before scheduler/optimizer started stepping, returning lr=0"
)
last_lr
=
0
else
:
raise
...
...
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py
View file @
c9035e45
...
...
@@ -531,7 +531,7 @@ class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
if
getattr
(
self
.
config
,
"gradient_checkpointing"
,
False
)
and
self
.
training
:
if
use_cache
:
logger
.
warn
(
logger
.
warn
ing
(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
...
...
@@ -2512,7 +2512,7 @@ class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_model
if
getattr
(
self
.
config
,
"gradient_checkpointing"
,
False
)
and
self
.
training
:
if
use_cache
:
logger
.
warn
(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
logger
.
warn
ing
(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
use_cache
=
False
def
create_custom_forward
(
module
):
...
...
tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
View file @
c9035e45
...
...
@@ -353,7 +353,7 @@ def main():
if
list
(
sorted
(
label_name_to_id
.
keys
()))
==
list
(
sorted
(
label_list
)):
label_to_id
=
{
i
:
int
(
label_name_to_id
[
label_list
[
i
]])
for
i
in
range
(
num_labels
)}
else
:
logger
.
warn
(
logger
.
warn
ing
(
"Your model seems to have been trained with labels, but they don't match the dataset: "
,
f
"model labels:
{
list
(
sorted
(
label_name_to_id
.
keys
()))
}
, dataset labels:
{
list
(
sorted
(
label_list
))
}
."
"
\n
Ignoring the model labels as a result."
,
...
...
@@ -362,7 +362,7 @@ def main():
label_to_id
=
{
v
:
i
for
i
,
v
in
enumerate
(
label_list
)}
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
...
...
tests/test_logging.py
View file @
c9035e45
...
...
@@ -51,7 +51,7 @@ class HfArgumentParserTest(unittest.TestCase):
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if
level_origin
<=
logging
.
WARNING
:
with
CaptureLogger
(
logger
)
as
cl
:
logger
.
warn
(
msg
)
logger
.
warn
ing
(
msg
)
self
.
assertEqual
(
cl
.
out
,
msg
+
"
\n
"
)
# this is setting the level for all of `transformers.*` loggers
...
...
@@ -59,7 +59,7 @@ class HfArgumentParserTest(unittest.TestCase):
# should not be able to log warnings
with
CaptureLogger
(
logger
)
as
cl
:
logger
.
warn
(
msg
)
logger
.
warn
ing
(
msg
)
self
.
assertEqual
(
cl
.
out
,
""
)
# should be able to log warnings again
...
...
tests/test_trainer_callback.py
View file @
c9035e45
...
...
@@ -234,7 +234,7 @@ class TrainerCallbackTest(unittest.TestCase):
self
.
assertEqual
(
events
,
self
.
get_expected_events
(
trainer
))
# warning should be emitted for duplicated callbacks
with
unittest
.
mock
.
patch
(
"transformers.trainer_callback.logger.warn"
)
as
warn_mock
:
with
unittest
.
mock
.
patch
(
"transformers.trainer_callback.logger.warn
ing
"
)
as
warn_mock
:
trainer
=
self
.
get_trainer
(
callbacks
=
[
MyTestTrainerCallback
,
MyTestTrainerCallback
],
)
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment