Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
35daa566
Commit
35daa566
authored
Oct 11, 2019
by
Hongkun Yu
Committed by
GitHub
Oct 11, 2019
Browse files
Revert "Revert "Update usage of tf.contrib.data to tf.data.experimental" (#7654)"
This reverts commit
b4e560dc
.
parent
b4e560dc
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
9 additions
and
8 deletions
+9
-8
official/r1/resnet/resnet_run_loop.py
official/r1/resnet/resnet_run_loop.py
+1
-1
official/transformer/utils/dataset.py
official/transformer/utils/dataset.py
+6
-5
official/vision/detection/evaluation/coco_utils.py
official/vision/detection/evaluation/coco_utils.py
+1
-1
official/vision/image_classification/imagenet_preprocessing.py
...ial/vision/image_classification/imagenet_preprocessing.py
+1
-1
No files found.
official/r1/resnet/resnet_run_loop.py
View file @
35daa566
...
...
@@ -110,7 +110,7 @@ def process_record_dataset(dataset,
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.
contrib.da
ta.AUTOTUNE
# critical training path. Setting buffer_size to tf.
data.experimen
ta
l
.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
dataset
=
dataset
.
prefetch
(
buffer_size
=
tf
.
data
.
experimental
.
AUTOTUNE
)
...
...
official/transformer/utils/dataset.py
View file @
35daa566
...
...
@@ -183,7 +183,7 @@ def _batch_examples(dataset, batch_size, max_length):
# lengths as well. Resulting lengths of inputs and targets can differ.
return
grouped_dataset
.
padded_batch
(
bucket_batch_size
,
([
None
],
[
None
]))
return
dataset
.
apply
(
tf
.
contrib
.
da
ta
.
group_by_window
(
return
dataset
.
apply
(
tf
.
data
.
experimen
ta
l
.
group_by_window
(
key_func
=
example_to_bucket_id
,
reduce_func
=
batching_fn
,
window_size
=
None
,
...
...
@@ -223,7 +223,7 @@ def _read_and_batch_from_files(
# Read files and interleave results. When training, the order of the examples
# will be non-deterministic.
dataset
=
dataset
.
apply
(
tf
.
contrib
.
da
ta
.
parallel_interleave
(
tf
.
data
.
experimen
ta
l
.
parallel_interleave
(
_load_records
,
sloppy
=
shuffle
,
cycle_length
=
num_parallel_calls
))
# Parse each tf.Example into a dictionary
...
...
@@ -235,8 +235,9 @@ def _read_and_batch_from_files(
dataset
=
dataset
.
filter
(
lambda
x
,
y
:
_filter_max_length
((
x
,
y
),
max_length
))
if
static_batch
:
dataset
=
dataset
.
apply
(
tf
.
contrib
.
data
.
padded_batch_and_drop_remainder
(
batch_size
//
max_length
,
([
max_length
],
[
max_length
])))
dataset
=
dataset
.
padded_batch
(
batch_size
//
max_length
,
([
max_length
],
[
max_length
]),
drop_remainder
=
True
)
else
:
# Group and batch such that each batch has examples of similar length.
dataset
=
_batch_examples
(
dataset
,
batch_size
,
max_length
)
...
...
@@ -244,7 +245,7 @@ def _read_and_batch_from_files(
dataset
=
dataset
.
repeat
(
repeat
)
# Prefetch the next element to improve speed of input pipeline.
dataset
=
dataset
.
prefetch
(
buffer_size
=
tf
.
contrib
.
da
ta
.
AUTOTUNE
)
dataset
=
dataset
.
prefetch
(
buffer_size
=
tf
.
data
.
experimen
ta
l
.
AUTOTUNE
)
return
dataset
...
...
official/vision/detection/evaluation/coco_utils.py
View file @
35daa566
...
...
@@ -318,7 +318,7 @@ class COCOGroundtruthGenerator(object):
cycle_length
=
32
,
sloppy
=
False
))
dataset
=
dataset
.
map
(
self
.
_parse_single_example
,
num_parallel_calls
=
64
)
dataset
=
dataset
.
prefetch
(
tf
.
contrib
.
da
ta
.
AUTOTUNE
)
dataset
=
dataset
.
prefetch
(
tf
.
data
.
experimen
ta
l
.
AUTOTUNE
)
dataset
=
dataset
.
batch
(
1
,
drop_remainder
=
False
)
return
dataset
...
...
official/vision/image_classification/imagenet_preprocessing.py
View file @
35daa566
...
...
@@ -128,7 +128,7 @@ def process_record_dataset(dataset,
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.
contrib.da
ta.AUTOTUNE
# critical training path. Setting buffer_size to tf.
data.experimen
ta
l
.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
dataset
=
dataset
.
prefetch
(
buffer_size
=
tf
.
data
.
experimental
.
AUTOTUNE
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment