Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
e9442440
Unverified
Commit
e9442440
authored
Aug 25, 2022
by
Rahul A R
Committed by
GitHub
Aug 25, 2022
Browse files
streamlining 'checkpointing_steps' parsing (#18755)
parent
fbf382c8
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
30 additions
and
60 deletions
+30
-60
examples/pytorch/image-classification/run_image_classification_no_trainer.py
...age-classification/run_image_classification_no_trainer.py
+3
-6
examples/pytorch/language-modeling/run_clm_no_trainer.py
examples/pytorch/language-modeling/run_clm_no_trainer.py
+3
-6
examples/pytorch/language-modeling/run_mlm_no_trainer.py
examples/pytorch/language-modeling/run_mlm_no_trainer.py
+3
-6
examples/pytorch/multiple-choice/run_swag_no_trainer.py
examples/pytorch/multiple-choice/run_swag_no_trainer.py
+3
-6
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
...torch/question-answering/run_qa_beam_search_no_trainer.py
+3
-6
examples/pytorch/question-answering/run_qa_no_trainer.py
examples/pytorch/question-answering/run_qa_no_trainer.py
+3
-6
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
...ntic-segmentation/run_semantic_segmentation_no_trainer.py
+3
-6
examples/pytorch/text-classification/run_glue_no_trainer.py
examples/pytorch/text-classification/run_glue_no_trainer.py
+3
-6
examples/pytorch/token-classification/run_ner_no_trainer.py
examples/pytorch/token-classification/run_ner_no_trainer.py
+3
-6
examples/pytorch/translation/run_translation_no_trainer.py
examples/pytorch/translation/run_translation_no_trainer.py
+3
-6
No files found.
examples/pytorch/image-classification/run_image_classification_no_trainer.py
View file @
e9442440
...
...
@@ -406,12 +406,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/language-modeling/run_clm_no_trainer.py
View file @
e9442440
...
...
@@ -508,12 +508,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/language-modeling/run_mlm_no_trainer.py
View file @
e9442440
...
...
@@ -552,12 +552,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/multiple-choice/run_swag_no_trainer.py
View file @
e9442440
...
...
@@ -505,12 +505,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
View file @
e9442440
...
...
@@ -764,12 +764,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration
if
args
.
with_tracking
:
...
...
examples/pytorch/question-answering/run_qa_no_trainer.py
View file @
e9442440
...
...
@@ -779,12 +779,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
View file @
e9442440
...
...
@@ -475,12 +475,9 @@ def main():
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps
=
False
...
...
examples/pytorch/text-classification/run_glue_no_trainer.py
View file @
e9442440
...
...
@@ -451,12 +451,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/token-classification/run_ner_no_trainer.py
View file @
e9442440
...
...
@@ -566,12 +566,9 @@ def main():
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
...
...
examples/pytorch/translation/run_translation_no_trainer.py
View file @
e9442440
...
...
@@ -549,12 +549,9 @@ def main():
# Afterwards we recalculate our number of training epochs
args
.
num_train_epochs
=
math
.
ceil
(
args
.
max_train_steps
/
num_update_steps_per_epoch
)
# Figure out how many steps we should save the Accelerator states
if
hasattr
(
args
.
checkpointing_steps
,
"isdigit"
):
checkpointing_steps
=
args
.
checkpointing_steps
if
args
.
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
args
.
checkpointing_steps
)
else
:
checkpointing_steps
=
None
checkpointing_steps
=
args
.
checkpointing_steps
if
checkpointing_steps
is
not
None
and
checkpointing_steps
.
isdigit
():
checkpointing_steps
=
int
(
checkpointing_steps
)
# We need to initialize the trackers we use, and also store our configuration.
# We initialize the trackers only on main process because `accelerator.log`
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment