Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
c9035e45
Unverified
Commit
c9035e45
authored
Apr 07, 2021
by
Stas Bekman
Committed by
GitHub
Apr 07, 2021
Browse files
fix: The 'warn' method is deprecated (#11105)
* The 'warn' method is deprecated * fix test
parent
247bed38
Changes
52
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
27 additions
and
27 deletions
+27
-27
examples/language-modeling/run_clm.py
examples/language-modeling/run_clm.py
+2
-2
examples/language-modeling/run_clm_no_trainer.py
examples/language-modeling/run_clm_no_trainer.py
+2
-2
examples/language-modeling/run_mlm.py
examples/language-modeling/run_mlm.py
+2
-2
examples/language-modeling/run_mlm_no_trainer.py
examples/language-modeling/run_mlm_no_trainer.py
+2
-2
examples/language-modeling/run_plm.py
examples/language-modeling/run_plm.py
+1
-1
examples/legacy/question-answering/run_squad.py
examples/legacy/question-answering/run_squad.py
+1
-1
examples/legacy/seq2seq/seq2seq_trainer.py
examples/legacy/seq2seq/seq2seq_trainer.py
+2
-2
examples/multiple-choice/run_swag.py
examples/multiple-choice/run_swag.py
+2
-2
examples/question-answering/run_qa.py
examples/question-answering/run_qa.py
+1
-1
examples/question-answering/run_qa_beam_search.py
examples/question-answering/run_qa_beam_search.py
+1
-1
examples/question-answering/run_qa_beam_search_no_trainer.py
examples/question-answering/run_qa_beam_search_no_trainer.py
+1
-1
examples/question-answering/run_qa_no_trainer.py
examples/question-answering/run_qa_no_trainer.py
+1
-1
examples/question-answering/run_tf_squad.py
examples/question-answering/run_tf_squad.py
+1
-1
examples/research_projects/movement-pruning/masked_run_squad.py
...es/research_projects/movement-pruning/masked_run_squad.py
+1
-1
examples/seq2seq/run_summarization.py
examples/seq2seq/run_summarization.py
+1
-1
examples/seq2seq/run_translation.py
examples/seq2seq/run_translation.py
+1
-1
examples/text-classification/run_glue.py
examples/text-classification/run_glue.py
+2
-2
examples/text-classification/run_glue_no_trainer.py
examples/text-classification/run_glue_no_trainer.py
+1
-1
src/transformers/configuration_utils.py
src/transformers/configuration_utils.py
+1
-1
src/transformers/data/datasets/squad.py
src/transformers/data/datasets/squad.py
+1
-1
No files found.
examples/language-modeling/run_clm.py
View file @
c9035e45
...
@@ -330,14 +330,14 @@ def main():
...
@@ -330,14 +330,14 @@ def main():
if
data_args
.
block_size
is
None
:
if
data_args
.
block_size
is
None
:
block_size
=
tokenizer
.
model_max_length
block_size
=
tokenizer
.
model_max_length
if
block_size
>
1024
:
if
block_size
>
1024
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
)
block_size
=
1024
block_size
=
1024
else
:
else
:
if
data_args
.
block_size
>
tokenizer
.
model_max_length
:
if
data_args
.
block_size
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The block_size passed (
{
data_args
.
block_size
}
) is larger than the maximum length for the model"
f
"The block_size passed (
{
data_args
.
block_size
}
) is larger than the maximum length for the model"
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/language-modeling/run_clm_no_trainer.py
View file @
c9035e45
...
@@ -305,14 +305,14 @@ def main():
...
@@ -305,14 +305,14 @@ def main():
if
args
.
block_size
is
None
:
if
args
.
block_size
is
None
:
block_size
=
tokenizer
.
model_max_length
block_size
=
tokenizer
.
model_max_length
if
block_size
>
1024
:
if
block_size
>
1024
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
)
block_size
=
1024
block_size
=
1024
else
:
else
:
if
args
.
block_size
>
tokenizer
.
model_max_length
:
if
args
.
block_size
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The block_size passed (
{
args
.
block_size
}
) is larger than the maximum length for the model"
f
"The block_size passed (
{
args
.
block_size
}
) is larger than the maximum length for the model"
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
f
"(
{
tokenizer
.
model_max_length
}
). Using block_size=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/language-modeling/run_mlm.py
View file @
c9035e45
...
@@ -324,14 +324,14 @@ def main():
...
@@ -324,14 +324,14 @@ def main():
if
data_args
.
max_seq_length
is
None
:
if
data_args
.
max_seq_length
is
None
:
max_seq_length
=
tokenizer
.
model_max_length
max_seq_length
=
tokenizer
.
model_max_length
if
max_seq_length
>
1024
:
if
max_seq_length
>
1024
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
)
max_seq_length
=
1024
max_seq_length
=
1024
else
:
else
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/language-modeling/run_mlm_no_trainer.py
View file @
c9035e45
...
@@ -308,14 +308,14 @@ def main():
...
@@ -308,14 +308,14 @@ def main():
if
args
.
max_seq_length
is
None
:
if
args
.
max_seq_length
is
None
:
max_seq_length
=
tokenizer
.
model_max_length
max_seq_length
=
tokenizer
.
model_max_length
if
max_seq_length
>
1024
:
if
max_seq_length
>
1024
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
)
max_seq_length
=
1024
max_seq_length
=
1024
else
:
else
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/language-modeling/run_plm.py
View file @
c9035e45
...
@@ -319,7 +319,7 @@ def main():
...
@@ -319,7 +319,7 @@ def main():
text_column_name
=
"text"
if
"text"
in
column_names
else
column_names
[
0
]
text_column_name
=
"text"
if
"text"
in
column_names
else
column_names
[
0
]
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/legacy/question-answering/run_squad.py
View file @
c9035e45
...
@@ -436,7 +436,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
...
@@ -436,7 +436,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
raise
ImportError
(
"If not data_dir is specified, tensorflow_datasets needs to be installed."
)
raise
ImportError
(
"If not data_dir is specified, tensorflow_datasets needs to be installed."
)
if
args
.
version_2_with_negative
:
if
args
.
version_2_with_negative
:
logger
.
warn
(
"tensorflow_datasets does not handle version 2 of SQuAD."
)
logger
.
warn
ing
(
"tensorflow_datasets does not handle version 2 of SQuAD."
)
tfds_examples
=
tfds
.
load
(
"squad"
)
tfds_examples
=
tfds
.
load
(
"squad"
)
examples
=
SquadV1Processor
().
get_examples_from_dataset
(
tfds_examples
,
evaluate
=
evaluate
)
examples
=
SquadV1Processor
().
get_examples_from_dataset
(
tfds_examples
,
evaluate
=
evaluate
)
...
...
examples/legacy/seq2seq/seq2seq_trainer.py
View file @
c9035e45
...
@@ -73,7 +73,7 @@ class Seq2SeqTrainer(Trainer):
...
@@ -73,7 +73,7 @@ class Seq2SeqTrainer(Trainer):
),
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss calculation or doing label smoothing."
),
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss calculation or doing label smoothing."
if
self
.
config
.
pad_token_id
is
None
and
self
.
config
.
eos_token_id
is
not
None
:
if
self
.
config
.
pad_token_id
is
None
and
self
.
config
.
eos_token_id
is
not
None
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The `config.pad_token_id` is `None`. Using `config.eos_token_id` =
{
self
.
config
.
eos_token_id
}
for padding.."
f
"The `config.pad_token_id` is `None`. Using `config.eos_token_id` =
{
self
.
config
.
eos_token_id
}
for padding.."
)
)
...
@@ -127,7 +127,7 @@ class Seq2SeqTrainer(Trainer):
...
@@ -127,7 +127,7 @@ class Seq2SeqTrainer(Trainer):
if
self
.
lr_scheduler
is
None
:
if
self
.
lr_scheduler
is
None
:
self
.
lr_scheduler
=
self
.
_get_lr_scheduler
(
num_training_steps
)
self
.
lr_scheduler
=
self
.
_get_lr_scheduler
(
num_training_steps
)
else
:
# ignoring --lr_scheduler
else
:
# ignoring --lr_scheduler
logger
.
warn
(
"scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored."
)
logger
.
warn
ing
(
"scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored."
)
def
_get_lr_scheduler
(
self
,
num_training_steps
):
def
_get_lr_scheduler
(
self
,
num_training_steps
):
schedule_func
=
arg_to_scheduler
[
self
.
args
.
lr_scheduler
]
schedule_func
=
arg_to_scheduler
[
self
.
args
.
lr_scheduler
]
...
...
examples/multiple-choice/run_swag.py
View file @
c9035e45
...
@@ -310,14 +310,14 @@ def main():
...
@@ -310,14 +310,14 @@ def main():
if
data_args
.
max_seq_length
is
None
:
if
data_args
.
max_seq_length
is
None
:
max_seq_length
=
tokenizer
.
model_max_length
max_seq_length
=
tokenizer
.
model_max_length
if
max_seq_length
>
1024
:
if
max_seq_length
>
1024
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
f
"The tokenizer picked seems to have a very large `model_max_length` (
{
tokenizer
.
model_max_length
}
). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
)
max_seq_length
=
1024
max_seq_length
=
1024
else
:
else
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/question-answering/run_qa.py
View file @
c9035e45
...
@@ -324,7 +324,7 @@ def main():
...
@@ -324,7 +324,7 @@ def main():
pad_on_right
=
tokenizer
.
padding_side
==
"right"
pad_on_right
=
tokenizer
.
padding_side
==
"right"
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/question-answering/run_qa_beam_search.py
View file @
c9035e45
...
@@ -313,7 +313,7 @@ def main():
...
@@ -313,7 +313,7 @@ def main():
pad_on_right
=
tokenizer
.
padding_side
==
"right"
pad_on_right
=
tokenizer
.
padding_side
==
"right"
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/question-answering/run_qa_beam_search_no_trainer.py
View file @
c9035e45
...
@@ -291,7 +291,7 @@ def main():
...
@@ -291,7 +291,7 @@ def main():
pad_on_right
=
tokenizer
.
padding_side
==
"right"
pad_on_right
=
tokenizer
.
padding_side
==
"right"
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/question-answering/run_qa_no_trainer.py
View file @
c9035e45
...
@@ -343,7 +343,7 @@ def main():
...
@@ -343,7 +343,7 @@ def main():
pad_on_right
=
tokenizer
.
padding_side
==
"right"
pad_on_right
=
tokenizer
.
padding_side
==
"right"
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/question-answering/run_tf_squad.py
View file @
c9035e45
...
@@ -181,7 +181,7 @@ def main():
...
@@ -181,7 +181,7 @@ def main():
# Get datasets
# Get datasets
if
data_args
.
use_tfds
:
if
data_args
.
use_tfds
:
if
data_args
.
version_2_with_negative
:
if
data_args
.
version_2_with_negative
:
logger
.
warn
(
"tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically"
)
logger
.
warn
ing
(
"tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically"
)
try
:
try
:
import
tensorflow_datasets
as
tfds
import
tensorflow_datasets
as
tfds
...
...
examples/research_projects/movement-pruning/masked_run_squad.py
View file @
c9035e45
...
@@ -629,7 +629,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
...
@@ -629,7 +629,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
raise
ImportError
(
"If not data_dir is specified, tensorflow_datasets needs to be installed."
)
raise
ImportError
(
"If not data_dir is specified, tensorflow_datasets needs to be installed."
)
if
args
.
version_2_with_negative
:
if
args
.
version_2_with_negative
:
logger
.
warn
(
"tensorflow_datasets does not handle version 2 of SQuAD."
)
logger
.
warn
ing
(
"tensorflow_datasets does not handle version 2 of SQuAD."
)
tfds_examples
=
tfds
.
load
(
"squad"
)
tfds_examples
=
tfds
.
load
(
"squad"
)
examples
=
SquadV1Processor
().
get_examples_from_dataset
(
tfds_examples
,
evaluate
=
evaluate
)
examples
=
SquadV1Processor
().
get_examples_from_dataset
(
tfds_examples
,
evaluate
=
evaluate
)
...
...
examples/seq2seq/run_summarization.py
View file @
c9035e45
...
@@ -394,7 +394,7 @@ def main():
...
@@ -394,7 +394,7 @@ def main():
padding
=
"max_length"
if
data_args
.
pad_to_max_length
else
False
padding
=
"max_length"
if
data_args
.
pad_to_max_length
else
False
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
logger
.
warn
(
logger
.
warn
ing
(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
)
)
...
...
examples/seq2seq/run_translation.py
View file @
c9035e45
...
@@ -367,7 +367,7 @@ def main():
...
@@ -367,7 +367,7 @@ def main():
padding
=
"max_length"
if
data_args
.
pad_to_max_length
else
False
padding
=
"max_length"
if
data_args
.
pad_to_max_length
else
False
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
logger
.
warn
(
logger
.
warn
ing
(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
)
)
...
...
examples/text-classification/run_glue.py
View file @
c9035e45
...
@@ -351,7 +351,7 @@ def main():
...
@@ -351,7 +351,7 @@ def main():
if
list
(
sorted
(
label_name_to_id
.
keys
()))
==
list
(
sorted
(
label_list
)):
if
list
(
sorted
(
label_name_to_id
.
keys
()))
==
list
(
sorted
(
label_list
)):
label_to_id
=
{
i
:
int
(
label_name_to_id
[
label_list
[
i
]])
for
i
in
range
(
num_labels
)}
label_to_id
=
{
i
:
int
(
label_name_to_id
[
label_list
[
i
]])
for
i
in
range
(
num_labels
)}
else
:
else
:
logger
.
warn
(
logger
.
warn
ing
(
"Your model seems to have been trained with labels, but they don't match the dataset: "
,
"Your model seems to have been trained with labels, but they don't match the dataset: "
,
f
"model labels:
{
list
(
sorted
(
label_name_to_id
.
keys
()))
}
, dataset labels:
{
list
(
sorted
(
label_list
))
}
."
f
"model labels:
{
list
(
sorted
(
label_name_to_id
.
keys
()))
}
, dataset labels:
{
list
(
sorted
(
label_list
))
}
."
"
\n
Ignoring the model labels as a result."
,
"
\n
Ignoring the model labels as a result."
,
...
@@ -360,7 +360,7 @@ def main():
...
@@ -360,7 +360,7 @@ def main():
label_to_id
=
{
v
:
i
for
i
,
v
in
enumerate
(
label_list
)}
label_to_id
=
{
v
:
i
for
i
,
v
in
enumerate
(
label_list
)}
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warn
(
logger
.
warn
ing
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/text-classification/run_glue_no_trainer.py
View file @
c9035e45
...
@@ -274,7 +274,7 @@ def main():
...
@@ -274,7 +274,7 @@ def main():
)
)
label_to_id
=
{
i
:
label_name_to_id
[
label_list
[
i
]]
for
i
in
range
(
num_labels
)}
label_to_id
=
{
i
:
label_name_to_id
[
label_list
[
i
]]
for
i
in
range
(
num_labels
)}
else
:
else
:
logger
.
warn
(
logger
.
warn
ing
(
"Your model seems to have been trained with labels, but they don't match the dataset: "
,
"Your model seems to have been trained with labels, but they don't match the dataset: "
,
f
"model labels:
{
list
(
sorted
(
label_name_to_id
.
keys
()))
}
, dataset labels:
{
list
(
sorted
(
label_list
))
}
."
f
"model labels:
{
list
(
sorted
(
label_name_to_id
.
keys
()))
}
, dataset labels:
{
list
(
sorted
(
label_list
))
}
."
"
\n
Ignoring the model labels as a result."
,
"
\n
Ignoring the model labels as a result."
,
...
...
src/transformers/configuration_utils.py
View file @
c9035e45
...
@@ -262,7 +262,7 @@ class PretrainedConfig(object):
...
@@ -262,7 +262,7 @@ class PretrainedConfig(object):
# TPU arguments
# TPU arguments
if
kwargs
.
pop
(
"xla_device"
,
None
)
is
not
None
:
if
kwargs
.
pop
(
"xla_device"
,
None
)
is
not
None
:
logger
.
warn
(
logger
.
warn
ing
(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
"safely remove it from your `config.json` file."
)
)
...
...
src/transformers/data/datasets/squad.py
View file @
c9035e45
...
@@ -152,7 +152,7 @@ class SquadDataset(Dataset):
...
@@ -152,7 +152,7 @@ class SquadDataset(Dataset):
)
)
if
self
.
dataset
is
None
or
self
.
examples
is
None
:
if
self
.
dataset
is
None
or
self
.
examples
is
None
:
logger
.
warn
(
logger
.
warn
ing
(
f
"Deleting cached file
{
cached_features_file
}
will allow dataset and examples to be cached in future run"
f
"Deleting cached file
{
cached_features_file
}
will allow dataset and examples to be cached in future run"
)
)
else
:
else
:
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment