Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
fa15ed1e
Commit
fa15ed1e
authored
Jan 20, 2021
by
Soroosh Yazdani
Committed by
TF Object Detection Team
Jan 20, 2021
Browse files
Adding the option of continuous eval with yield, to allow metrics to be updated and logged.
PiperOrigin-RevId: 352851428
parent
c787baad
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
34 additions
and
6 deletions
+34
-6
research/object_detection/model_lib.py
research/object_detection/model_lib.py
+34
-6
No files found.
research/object_detection/model_lib.py
View file @
fa15ed1e
...
@@ -971,12 +971,12 @@ def _evaluate_checkpoint(estimator,
...
@@ -971,12 +971,12 @@ def _evaluate_checkpoint(estimator,
raise
e
raise
e
def
continuous_eval
(
estimator
,
def
continuous_eval
_generator
(
estimator
,
model_dir
,
model_dir
,
input_fn
,
input_fn
,
train_steps
,
train_steps
,
name
,
name
,
max_retries
=
0
):
max_retries
=
0
):
"""Perform continuous evaluation on checkpoints written to a model directory.
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
Args:
...
@@ -989,6 +989,9 @@ def continuous_eval(estimator,
...
@@ -989,6 +989,9 @@ def continuous_eval(estimator,
max_retries: Maximum number of times to retry the evaluation on encountering
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
evaluation.
Yields:
Pair of current step and eval_results.
"""
"""
def
terminate_eval
():
def
terminate_eval
():
...
@@ -1011,6 +1014,7 @@ def continuous_eval(estimator,
...
@@ -1011,6 +1014,7 @@ def continuous_eval(estimator,
# Terminate eval job when final checkpoint is reached
# Terminate eval job when final checkpoint is reached
current_step
=
int
(
os
.
path
.
basename
(
ckpt
).
split
(
'-'
)[
1
])
current_step
=
int
(
os
.
path
.
basename
(
ckpt
).
split
(
'-'
)[
1
])
yield
(
current_step
,
eval_results
)
if
current_step
>=
train_steps
:
if
current_step
>=
train_steps
:
tf
.
logging
.
info
(
tf
.
logging
.
info
(
'Evaluation finished after training step %d'
%
current_step
)
'Evaluation finished after training step %d'
%
current_step
)
...
@@ -1021,6 +1025,30 @@ def continuous_eval(estimator,
...
@@ -1021,6 +1025,30 @@ def continuous_eval(estimator,
'Checkpoint %s no longer exists, skipping checkpoint'
%
ckpt
)
'Checkpoint %s no longer exists, skipping checkpoint'
%
ckpt
)
def
continuous_eval
(
estimator
,
model_dir
,
input_fn
,
train_steps
,
name
,
max_retries
=
0
):
"""Performs continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
max_retries: Maximum number of times to retry the evaluation on encountering
a tf.errors.InvalidArgumentError. If negative, will always retry the
evaluation.
"""
for
current_step
,
eval_results
in
continuous_eval_generator
(
estimator
,
model_dir
,
input_fn
,
train_steps
,
name
,
max_retries
):
tf
.
logging
.
info
(
'Step %s, Eval results: %s'
,
current_step
,
eval_results
)
def
populate_experiment
(
run_config
,
def
populate_experiment
(
run_config
,
hparams
,
hparams
,
pipeline_config_path
,
pipeline_config_path
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment