Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
8f345563
Commit
8f345563
authored
Sep 29, 2020
by
Chenkai Kuang
Committed by
A. Unique TensorFlower
Sep 29, 2020
Browse files
Internal change
PiperOrigin-RevId: 334517779
parent
2986bcaf
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
16 additions
and
23 deletions
+16
-23
official/modeling/training/distributed_executor.py
official/modeling/training/distributed_executor.py
+1
-2
official/nlp/bert/model_training_utils.py
official/nlp/bert/model_training_utils.py
+1
-2
official/nlp/bert/run_classifier.py
official/nlp/bert/run_classifier.py
+1
-2
official/nlp/bert/run_squad_helper.py
official/nlp/bert/run_squad_helper.py
+1
-2
official/nlp/nhnet/input_pipeline.py
official/nlp/nhnet/input_pipeline.py
+2
-2
official/nlp/tasks/sentence_prediction_test.py
official/nlp/tasks/sentence_prediction_test.py
+1
-1
official/nlp/tasks/tagging_test.py
official/nlp/tasks/tagging_test.py
+1
-1
official/nlp/transformer/transformer_main.py
official/nlp/transformer/transformer_main.py
+2
-3
official/nlp/xlnet/data_utils.py
official/nlp/xlnet/data_utils.py
+4
-5
official/vision/image_classification/dataset_factory.py
official/vision/image_classification/dataset_factory.py
+1
-2
official/vision/image_classification/resnet/resnet_runnable.py
...ial/vision/image_classification/resnet/resnet_runnable.py
+1
-1
No files found.
official/modeling/training/distributed_executor.py
View file @
8f345563
...
...
@@ -207,8 +207,7 @@ class DistributedExecutor(object):
# across workers. Since Dataset instance cannot be cloned in eager mode,
# we instead pass callable that returns a dataset.
if
self
.
_is_multi_host
:
return
iter
(
strategy
.
experimental_distribute_datasets_from_function
(
input_fn
))
return
iter
(
strategy
.
distribute_datasets_from_function
(
input_fn
))
else
:
input_data
=
input_fn
()
return
iter
(
strategy
.
experimental_distribute_dataset
(
input_data
))
...
...
official/nlp/bert/model_training_utils.py
View file @
8f345563
...
...
@@ -65,8 +65,7 @@ def _get_input_iterator(input_fn, strategy):
# pass callable that returns a dataset.
if
not
callable
(
input_fn
):
raise
ValueError
(
'`input_fn` should be a closure that returns a dataset.'
)
iterator
=
iter
(
strategy
.
experimental_distribute_datasets_from_function
(
input_fn
))
iterator
=
iter
(
strategy
.
distribute_datasets_from_function
(
input_fn
))
return
iterator
...
...
official/nlp/bert/run_classifier.py
View file @
8f345563
...
...
@@ -325,8 +325,7 @@ def get_predictions_and_labels(strategy,
tf
.
experimental
.
async_clear_error
()
return
preds
,
golds
test_iter
=
iter
(
strategy
.
experimental_distribute_datasets_from_function
(
eval_input_fn
))
test_iter
=
iter
(
strategy
.
distribute_datasets_from_function
(
eval_input_fn
))
predictions
,
labels
=
_run_evaluation
(
test_iter
)
return
predictions
,
labels
...
...
official/nlp/bert/run_squad_helper.py
View file @
8f345563
...
...
@@ -186,8 +186,7 @@ def predict_squad_customized(strategy, input_meta_data, predict_tfrecord_path,
FLAGS
.
predict_batch_size
,
is_training
=
False
)
predict_iterator
=
iter
(
strategy
.
experimental_distribute_datasets_from_function
(
predict_dataset_fn
))
strategy
.
distribute_datasets_from_function
(
predict_dataset_fn
))
@
tf
.
function
def
predict_step
(
iterator
):
...
...
official/nlp/nhnet/input_pipeline.py
View file @
8f345563
...
...
@@ -230,7 +230,7 @@ def get_input_dataset(input_file_pattern,
strategy
.
num_replicas_in_sync
))
# As auto rebatching is not supported in
# `
experimental_
distribute_datasets_from_function()` API, which is
# `distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
batch_size
=
int
(
batch_size
/
strategy
.
num_replicas_in_sync
)
...
...
@@ -249,6 +249,6 @@ def get_input_dataset(input_file_pattern,
input_pipeline_context
=
ctx
)
if
use_dataset_fn
:
return
strategy
.
experimental_
distribute_datasets_from_function
(
_dataset_fn
)
return
strategy
.
distribute_datasets_from_function
(
_dataset_fn
)
else
:
return
strategy
.
experimental_distribute_dataset
(
_dataset_fn
())
official/nlp/tasks/sentence_prediction_test.py
View file @
8f345563
...
...
@@ -80,7 +80,7 @@ class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase):
metrics
=
task
.
build_metrics
()
strategy
=
tf
.
distribute
.
get_strategy
()
dataset
=
strategy
.
experimental_
distribute_datasets_from_function
(
dataset
=
strategy
.
distribute_datasets_from_function
(
functools
.
partial
(
task
.
build_inputs
,
config
.
train_data
))
iterator
=
iter
(
dataset
)
...
...
official/nlp/tasks/tagging_test.py
View file @
8f345563
...
...
@@ -66,7 +66,7 @@ class TaggingTest(tf.test.TestCase):
metrics
=
task
.
build_metrics
()
strategy
=
tf
.
distribute
.
get_strategy
()
dataset
=
strategy
.
experimental_
distribute_datasets_from_function
(
dataset
=
strategy
.
distribute_datasets_from_function
(
functools
.
partial
(
task
.
build_inputs
,
config
.
train_data
))
iterator
=
iter
(
dataset
)
...
...
official/nlp/transformer/transformer_main.py
View file @
8f345563
...
...
@@ -227,12 +227,11 @@ class TransformerTask(object):
if
self
.
use_tpu
:
# Different from experimental_distribute_dataset,
#
experimental_
distribute_datasets_from_function requires
# distribute_datasets_from_function requires
# per-replica/local batch size.
params
[
"batch_size"
]
/=
self
.
distribution_strategy
.
num_replicas_in_sync
train_ds
=
(
self
.
distribution_strategy
.
experimental_distribute_datasets_from_function
(
self
.
distribution_strategy
.
distribute_datasets_from_function
(
lambda
ctx
:
data_pipeline
.
train_input_fn
(
params
,
ctx
)))
else
:
train_ds
=
data_pipeline
.
train_input_fn
(
params
)
...
...
official/nlp/xlnet/data_utils.py
View file @
8f345563
...
...
@@ -167,8 +167,7 @@ def get_input_iterator(input_fn, strategy):
# pass callable that returns a dataset.
input_data
=
input_fn
()
if
callable
(
input_data
):
iterator
=
iter
(
strategy
.
experimental_distribute_datasets_from_function
(
input_data
))
iterator
=
iter
(
strategy
.
distribute_datasets_from_function
(
input_data
))
else
:
iterator
=
iter
(
strategy
.
experimental_distribute_dataset
(
input_data
))
return
iterator
...
...
@@ -189,7 +188,7 @@ def get_classification_input_data(batch_size, seq_len, strategy, is_training,
strategy
.
num_replicas_in_sync
))
# As auto rebatching is not supported in
# `
experimental_
distribute_datasets_from_function()` API, which is
# `distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
batch_size
=
int
(
batch_size
/
strategy
.
num_replicas_in_sync
)
...
...
@@ -222,7 +221,7 @@ def get_squad_input_data(batch_size, seq_len, q_len, strategy, is_training,
strategy
.
num_replicas_in_sync
))
# As auto rebatching is not supported in
# `
experimental_
distribute_datasets_from_function()` API, which is
# `distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
batch_size
=
int
(
batch_size
/
strategy
.
num_replicas_in_sync
)
...
...
@@ -624,7 +623,7 @@ def get_pretrain_input_data(batch_size,
strategy
.
num_replicas_in_sync
))
# As auto rebatching is not supported in
# `
experimental_
distribute_datasets_from_function()` API, which is
# `distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
batch_size
=
int
(
batch_size
/
strategy
.
num_replicas_in_sync
)
...
...
official/vision/image_classification/dataset_factory.py
View file @
8f345563
...
...
@@ -297,8 +297,7 @@ class DatasetBuilder:
'Passed a strategy with %d devices, but expected'
'%d devices.'
,
strategy
.
num_replicas_in_sync
,
self
.
config
.
num_devices
)
dataset
=
strategy
.
experimental_distribute_datasets_from_function
(
self
.
_build
)
dataset
=
strategy
.
distribute_datasets_from_function
(
self
.
_build
)
else
:
dataset
=
self
.
_build
()
...
...
official/vision/image_classification/resnet/resnet_runnable.py
View file @
8f345563
...
...
@@ -42,7 +42,7 @@ class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator):
self
.
strategy
.
num_replicas_in_sync
))
# As auto rebatching is not supported in
# `
experimental_
distribute_datasets_from_function()` API, which is
# `distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
self
.
batch_size
=
int
(
batch_size
/
self
.
strategy
.
num_replicas_in_sync
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment