Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
25e6e941
Unverified
Commit
25e6e941
authored
Nov 01, 2023
by
Dong-geon Lee
Committed by
GitHub
Oct 31, 2023
Browse files
Unify warning styles for better readability (#27184)
parent
50378cbf
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
20 additions
and
20 deletions
+20
-20
examples/pytorch/audio-classification/run_audio_classification.py
.../pytorch/audio-classification/run_audio_classification.py
+1
-1
examples/pytorch/contrastive-image-text/run_clip.py
examples/pytorch/contrastive-image-text/run_clip.py
+1
-1
examples/pytorch/image-classification/run_image_classification.py
.../pytorch/image-classification/run_image_classification.py
+1
-1
examples/pytorch/image-pretraining/run_mae.py
examples/pytorch/image-pretraining/run_mae.py
+1
-1
examples/pytorch/image-pretraining/run_mim.py
examples/pytorch/image-pretraining/run_mim.py
+1
-1
examples/pytorch/language-modeling/run_clm.py
examples/pytorch/language-modeling/run_clm.py
+1
-1
examples/pytorch/language-modeling/run_mlm.py
examples/pytorch/language-modeling/run_mlm.py
+1
-1
examples/pytorch/language-modeling/run_plm.py
examples/pytorch/language-modeling/run_plm.py
+1
-1
examples/pytorch/multiple-choice/run_swag.py
examples/pytorch/multiple-choice/run_swag.py
+1
-1
examples/pytorch/question-answering/run_qa.py
examples/pytorch/question-answering/run_qa.py
+1
-1
examples/pytorch/question-answering/run_qa_beam_search.py
examples/pytorch/question-answering/run_qa_beam_search.py
+1
-1
examples/pytorch/question-answering/run_seq2seq_qa.py
examples/pytorch/question-answering/run_seq2seq_qa.py
+1
-1
examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
...ytorch/semantic-segmentation/run_semantic_segmentation.py
+1
-1
examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
.../pytorch/speech-recognition/run_speech_recognition_ctc.py
+1
-1
examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
.../speech-recognition/run_speech_recognition_ctc_adapter.py
+1
-1
examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
...orch/speech-recognition/run_speech_recognition_seq2seq.py
+1
-1
examples/pytorch/summarization/run_summarization.py
examples/pytorch/summarization/run_summarization.py
+1
-1
examples/pytorch/text-classification/run_classification.py
examples/pytorch/text-classification/run_classification.py
+1
-1
examples/pytorch/text-classification/run_glue.py
examples/pytorch/text-classification/run_glue.py
+1
-1
examples/pytorch/text-classification/run_xnli.py
examples/pytorch/text-classification/run_xnli.py
+1
-1
No files found.
examples/pytorch/audio-classification/run_audio_classification.py
View file @
25e6e941
...
...
@@ -246,7 +246,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/contrastive-image-text/run_clip.py
View file @
25e6e941
...
...
@@ -284,7 +284,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/image-classification/run_image_classification.py
View file @
25e6e941
...
...
@@ -226,7 +226,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/image-pretraining/run_mae.py
View file @
25e6e941
...
...
@@ -214,7 +214,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/image-pretraining/run_mim.py
View file @
25e6e941
...
...
@@ -288,7 +288,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/language-modeling/run_clm.py
View file @
25e6e941
...
...
@@ -288,7 +288,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/language-modeling/run_mlm.py
View file @
25e6e941
...
...
@@ -288,7 +288,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/pytorch/language-modeling/run_plm.py
View file @
25e6e941
...
...
@@ -269,7 +269,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/multiple-choice/run_swag.py
View file @
25e6e941
...
...
@@ -275,7 +275,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/question-answering/run_qa.py
View file @
25e6e941
...
...
@@ -277,7 +277,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/question-answering/run_qa_beam_search.py
View file @
25e6e941
...
...
@@ -266,7 +266,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/question-answering/run_seq2seq_qa.py
View file @
25e6e941
...
...
@@ -323,7 +323,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
View file @
25e6e941
...
...
@@ -314,7 +314,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
View file @
25e6e941
...
...
@@ -433,7 +433,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
View file @
25e6e941
...
...
@@ -429,7 +429,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
View file @
25e6e941
...
...
@@ -325,7 +325,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/summarization/run_summarization.py
View file @
25e6e941
...
...
@@ -362,7 +362,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/text-classification/run_classification.py
View file @
25e6e941
...
...
@@ -318,7 +318,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/text-classification/run_glue.py
View file @
25e6e941
...
...
@@ -266,7 +266,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
examples/pytorch/text-classification/run_xnli.py
View file @
25e6e941
...
...
@@ -225,7 +225,7 @@ def main():
# Log on each process the small summary:
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
+
f
"distributed training:
{
training_args
.
parallel_mode
.
value
==
'distributed'
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
logger
.
info
(
f
"Training/evaluation parameters
{
training_args
}
"
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment