Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
6a67bfdc
Commit
6a67bfdc
authored
Mar 22, 2021
by
Vighnesh Birodkar
Committed by
TF Object Detection Team
Mar 22, 2021
Browse files
Internal change.
PiperOrigin-RevId: 364349645
parent
0c48b89f
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
33 additions
and
29 deletions
+33
-29
research/object_detection/builders/model_builder.py
research/object_detection/builders/model_builder.py
+4
-0
research/object_detection/model_lib_v2.py
research/object_detection/model_lib_v2.py
+29
-29
No files found.
research/object_detection/builders/model_builder.py
View file @
6a67bfdc
...
@@ -17,6 +17,9 @@
...
@@ -17,6 +17,9 @@
import
functools
import
functools
import
sys
import
sys
from
absl
import
logging
from
object_detection.builders
import
anchor_generator_builder
from
object_detection.builders
import
anchor_generator_builder
from
object_detection.builders
import
box_coder_builder
from
object_detection.builders
import
box_coder_builder
from
object_detection.builders
import
box_predictor_builder
from
object_detection.builders
import
box_predictor_builder
...
@@ -1064,6 +1067,7 @@ def _build_center_net_model(center_net_config, is_training, add_summaries):
...
@@ -1064,6 +1067,7 @@ def _build_center_net_model(center_net_config, is_training, add_summaries):
if
center_net_config
.
HasField
(
'post_processing'
):
if
center_net_config
.
HasField
(
'post_processing'
):
non_max_suppression_fn
,
_
=
post_processing_builder
.
build
(
non_max_suppression_fn
,
_
=
post_processing_builder
.
build
(
center_net_config
.
post_processing
)
center_net_config
.
post_processing
)
return
center_net_meta_arch
.
CenterNetMetaArch
(
return
center_net_meta_arch
.
CenterNetMetaArch
(
is_training
=
is_training
,
is_training
=
is_training
,
add_summaries
=
add_summaries
,
add_summaries
=
add_summaries
,
...
...
research/object_detection/model_lib_v2.py
View file @
6a67bfdc
...
@@ -351,32 +351,32 @@ def load_fine_tune_checkpoint(
...
@@ -351,32 +351,32 @@ def load_fine_tune_checkpoint(
features
,
labels
=
iter
(
input_dataset
).
next
()
features
,
labels
=
iter
(
input_dataset
).
next
()
@
tf
.
function
#
@tf.function
def
_dummy_computation_fn
(
features
,
labels
):
#
def _dummy_computation_fn(features, labels):
model
.
_is_training
=
False
# pylint: disable=protected-access
#
model._is_training = False # pylint: disable=protected-access
tf
.
keras
.
backend
.
set_learning_phase
(
False
)
#
tf.keras.backend.set_learning_phase(False)
labels
=
model_lib
.
unstack_batch
(
#
labels = model_lib.unstack_batch(
labels
,
unpad_groundtruth_tensors
=
unpad_groundtruth_tensors
)
#
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
return
_compute_losses_and_predictions_dicts
(
#
return _compute_losses_and_predictions_dicts(
model
,
#
model,
features
,
#
features,
labels
)
#
labels)
strategy
=
tf
.
compat
.
v2
.
distribute
.
get_strategy
()
#
strategy = tf.compat.v2.distribute.get_strategy()
if
hasattr
(
tf
.
distribute
.
Strategy
,
'run'
):
#
if hasattr(tf.distribute.Strategy, 'run'):
strategy
.
run
(
#
strategy.run(
_dummy_computation_fn
,
args
=
(
#
_dummy_computation_fn, args=(
features
,
#
features,
labels
,
#
labels,
))
#
))
else
:
#
else:
strategy
.
experimental_run_v2
(
#
strategy.experimental_run_v2(
_dummy_computation_fn
,
args
=
(
#
_dummy_computation_fn, args=(
features
,
#
features,
labels
,
#
labels,
))
#
))
restore_from_objects_dict
=
model
.
restore_from_objects
(
restore_from_objects_dict
=
model
.
restore_from_objects
(
fine_tune_checkpoint_type
=
checkpoint_type
)
fine_tune_checkpoint_type
=
checkpoint_type
)
...
@@ -1084,9 +1084,9 @@ def eval_continuously(
...
@@ -1084,9 +1084,9 @@ def eval_continuously(
# model and all its variables have been properly constructed. Specifically,
# model and all its variables have been properly constructed. Specifically,
# this is currently necessary prior to (potentially) creating shadow copies
# this is currently necessary prior to (potentially) creating shadow copies
# of the model variables for the EMA optimizer.
# of the model variables for the EMA optimizer.
dummy_image
,
dummy_shapes
=
detection_model
.
preprocess
(
#
dummy_image, dummy_shapes = detection_model.preprocess(
tf
.
zeros
([
1
,
512
,
512
,
3
],
dtype
=
tf
.
float32
))
#
tf.zeros([1, 512, 512, 3], dtype=tf.float32))
dummy_prediction_dict
=
detection_model
.
predict
(
dummy_image
,
dummy_shapes
)
#
dummy_prediction_dict = detection_model.predict(dummy_image, dummy_shapes)
eval_input
=
strategy
.
experimental_distribute_dataset
(
eval_input
=
strategy
.
experimental_distribute_dataset
(
inputs
.
eval_input
(
inputs
.
eval_input
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment