Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
aac33549
Commit
aac33549
authored
Mar 25, 2022
by
Hongkun Yu
Committed by
A. Unique TensorFlower
Mar 25, 2022
Browse files
Internal change
PiperOrigin-RevId: 437358569
parent
b6fcc07d
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
34 additions
and
12 deletions
+34
-12
official/core/input_reader.py
official/core/input_reader.py
+34
-12
No files found.
official/core/input_reader.py
View file @
aac33549
...
@@ -160,16 +160,38 @@ def _read_tfds(tfds_builder: tfds.core.DatasetBuilder,
...
@@ -160,16 +160,38 @@ def _read_tfds(tfds_builder: tfds.core.DatasetBuilder,
"""Reads a dataset from tfds."""
"""Reads a dataset from tfds."""
# No op if exist.
# No op if exist.
tfds_builder
.
download_and_prepare
()
tfds_builder
.
download_and_prepare
()
decoders
=
{}
if
tfds_skip_decoding_feature
:
for
skip_feature
in
tfds_skip_decoding_feature
.
split
(
','
):
decoders
[
skip_feature
.
strip
()]
=
tfds
.
decode
.
SkipDecoding
()
if
tfds_builder
.
info
.
splits
:
num_shards
=
len
(
tfds_builder
.
info
.
splits
[
tfds_split
].
file_instructions
)
else
:
# The tfds mock path often does not provide splits.
num_shards
=
1
if
input_context
and
num_shards
<
input_context
.
num_input_pipelines
:
# The number of files in the dataset split is smaller than the number of
# input pipelines. We read the entire dataset first and then shard in the
# host memory.
read_config
=
tfds
.
ReadConfig
(
interleave_cycle_length
=
cycle_length
,
interleave_block_length
=
block_length
,
input_context
=
None
,
shuffle_seed
=
seed
)
dataset
=
tfds_builder
.
as_dataset
(
split
=
tfds_split
,
shuffle_files
=
is_training
,
as_supervised
=
tfds_as_supervised
,
decoders
=
decoders
,
read_config
=
read_config
)
dataset
=
dataset
.
shard
(
input_context
.
num_input_pipelines
,
input_context
.
input_pipeline_id
)
else
:
read_config
=
tfds
.
ReadConfig
(
read_config
=
tfds
.
ReadConfig
(
interleave_cycle_length
=
cycle_length
,
interleave_cycle_length
=
cycle_length
,
interleave_block_length
=
block_length
,
interleave_block_length
=
block_length
,
input_context
=
input_context
,
input_context
=
input_context
,
shuffle_seed
=
seed
)
shuffle_seed
=
seed
)
decoders
=
{}
if
tfds_skip_decoding_feature
:
for
skip_feature
in
tfds_skip_decoding_feature
.
split
(
','
):
decoders
[
skip_feature
.
strip
()]
=
tfds
.
decode
.
SkipDecoding
()
dataset
=
tfds_builder
.
as_dataset
(
dataset
=
tfds_builder
.
as_dataset
(
split
=
tfds_split
,
split
=
tfds_split
,
shuffle_files
=
is_training
,
shuffle_files
=
is_training
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment