Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f4bf0dea
Unverified
Commit
f4bf0dea
authored
Jan 27, 2021
by
jncasey
Committed by
GitHub
Jan 27, 2021
Browse files
Fix auto-resume training from checkpoint (#9822)
* Fix auto-resume training from checkpoint * style fixes
parent
f2fabedb
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
7 additions
and
3 deletions
+7
-3
src/transformers/trainer_utils.py
src/transformers/trainer_utils.py
+7
-3
No files found.
src/transformers/trainer_utils.py
View file @
f4bf0dea
...
...
@@ -77,15 +77,19 @@ class TrainOutput(NamedTuple):
PREFIX_CHECKPOINT_DIR
=
"checkpoint"
_re_checkpoint
=
re
.
compile
(
r
"^"
+
PREFIX_CHECKPOINT_DIR
+
r
"\-(\d
)
+$"
)
_re_checkpoint
=
re
.
compile
(
r
"^"
+
PREFIX_CHECKPOINT_DIR
+
r
"\-(\d+
)
$"
)
def
get_last_checkpoint
(
folder
):
content
=
os
.
listdir
(
folder
)
checkpoints
=
[
path
for
path
in
content
if
_re_checkpoint
.
search
(
path
)
is
not
None
and
os
.
path
.
isdir
(
path
)]
checkpoints
=
[
path
for
path
in
content
if
_re_checkpoint
.
search
(
path
)
is
not
None
and
os
.
path
.
isdir
(
os
.
path
.
join
(
folder
,
path
))
]
if
len
(
checkpoints
)
==
0
:
return
return
max
(
checkpoints
,
key
=
lambda
x
:
int
(
_re_checkpoint
.
search
(
x
).
groups
()[
0
]))
return
os
.
path
.
join
(
folder
,
max
(
checkpoints
,
key
=
lambda
x
:
int
(
_re_checkpoint
.
search
(
x
).
groups
()[
0
]))
)
class
EvaluationStrategy
(
ExplicitEnum
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment