Commit 47bc1813 authored by syiming's avatar syiming
Browse files

Merge remote-tracking branch 'upstream/master' into add_multilevel_crop_and_resize

parents d8611151 b035a227
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Session hook for logging benchmark metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
class LoggingMetricHook(tf.estimator.LoggingTensorHook):
"""Hook to log benchmark metric information.
This hook is very similar as tf.train.LoggingTensorHook, which logs given
tensors every N local steps, every N seconds, or at the end. The metric
information will be logged to given log_dir or via metric_logger in JSON
format, which can be consumed by data analysis pipeline later.
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self, tensors, metric_logger=None,
every_n_iter=None, every_n_secs=None, at_end=False):
"""Initializer for LoggingMetricHook.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
hook should use to write the log.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
Raises:
ValueError:
1. `every_n_iter` is non-positive, or
2. Exactly one of every_n_iter and every_n_secs should be provided.
3. Exactly one of log_dir and metric_logger should be provided.
"""
super(LoggingMetricHook, self).__init__(
tensors=tensors,
every_n_iter=every_n_iter,
every_n_secs=every_n_secs,
at_end=at_end)
if metric_logger is None:
raise ValueError("metric_logger should be provided.")
self._logger = metric_logger
def begin(self):
super(LoggingMetricHook, self).begin()
self._global_step_tensor = tf.compat.v1.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use LoggingMetricHook.")
if self._global_step_tensor.name not in self._current_tensors:
self._current_tensors[self._global_step_tensor.name] = (
self._global_step_tensor)
def after_run(self, unused_run_context, run_values):
# should_trigger is a internal state that populated at before_run, and it is
# using self_timer to determine whether it should trigger.
if self._should_trigger:
self._log_metric(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_metric(values)
def _log_metric(self, tensor_values):
self._timer.update_last_triggered_step(self._iter_count)
global_step = tensor_values[self._global_step_tensor.name]
# self._tag_order is populated during the init of LoggingTensorHook
for tag in self._tag_order:
self._logger.log_metric(tag, tensor_values[tag], global_step=global_step)
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.python.training import monitored_session # pylint: disable=g-bad-import-order
from official.r1.utils.logs import metric_hook
from official.r1.utils.logs import mock_lib
class LoggingMetricHookTest(tf.test.TestCase):
"""Tests for LoggingMetricHook."""
def setUp(self):
super(LoggingMetricHookTest, self).setUp()
self._log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._logger = mock_lib.MockBenchmarkLogger()
def tearDown(self):
super(LoggingMetricHookTest, self).tearDown()
tf.io.gfile.rmtree(self.get_temp_dir())
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, "nvalid every_n_iter"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, "nvalid every_n_iter"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, "xactly one of"):
metric_hook.LoggingMetricHook(
tensors=["t"], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, "xactly one of"):
metric_hook.LoggingMetricHook(tensors=["t"])
with self.assertRaisesRegexp(ValueError, "metric_logger"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=5)
def test_print_at_end_only(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], at_end=True, metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
for _ in range(3):
mon_sess.run(train_op)
self.assertEqual(self._logger.logged_metric, [])
hook.end(sess)
self.assertEqual(len(self._logger.logged_metric), 1)
metric = self._logger.logged_metric[0]
self.assertRegexpMatches(metric["name"], "foo")
self.assertEqual(metric["value"], 42.0)
self.assertEqual(metric["unit"], None)
self.assertEqual(metric["global_step"], 0)
def test_global_step_not_found(self):
with tf.Graph().as_default():
t = tf.constant(42.0, name="foo")
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], at_end=True, metric_logger=self._logger)
with self.assertRaisesRegexp(
RuntimeError, "should be created to use LoggingMetricHook."):
hook.begin()
def test_log_tensors(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
t1 = tf.constant(42.0, name="foo")
t2 = tf.constant(43.0, name="bar")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t1, t2], at_end=True, metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
for _ in range(3):
mon_sess.run(train_op)
self.assertEqual(self._logger.logged_metric, [])
hook.end(sess)
self.assertEqual(len(self._logger.logged_metric), 2)
metric1 = self._logger.logged_metric[0]
self.assertRegexpMatches(str(metric1["name"]), "foo")
self.assertEqual(metric1["value"], 42.0)
self.assertEqual(metric1["unit"], None)
self.assertEqual(metric1["global_step"], 0)
metric2 = self._logger.logged_metric[1]
self.assertRegexpMatches(str(metric2["name"]), "bar")
self.assertEqual(metric2["value"], 43.0)
self.assertEqual(metric2["unit"], None)
self.assertEqual(metric2["global_step"], 0)
def _validate_print_every_n_steps(self, sess, at_end):
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], every_n_iter=10, at_end=at_end,
metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
for _ in range(3):
self._logger.logged_metric = []
for _ in range(9):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
# Add additional run to verify proper reset when called multiple times.
self._logger.logged_metric = []
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
self._logger.logged_metric = []
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
def test_print_every_n_steps(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_steps(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=False)
def test_print_every_n_steps_and_end(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_steps(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=True)
def _validate_print_every_n_secs(self, sess, at_end):
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], every_n_secs=1.0, at_end=at_end,
metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self._logger.logged_metric = []
mon_sess.run(train_op)
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
time.sleep(1.0)
self._logger.logged_metric = []
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
self._logger.logged_metric = []
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
def test_print_every_n_secs(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_secs(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=False)
def test_print_every_n_secs_and_end(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_secs(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=True)
if __name__ == "__main__":
tf.test.main()
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for the mlperf logging utils.
MLPerf compliance logging is only desired under a limited set of circumstances.
This module is intended to keep users from needing to consider logging (or
install the module) unless they are performing mlperf runs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import json
import os
import re
import subprocess
import sys
from absl import logging
import typing
# pylint:disable=logging-format-interpolation
_MIN_VERSION = (0, 0, 10)
_STACK_OFFSET = 2
SUDO = "sudo" if os.geteuid() else ""
# This indirection is used in docker.
DROP_CACHE_LOC = os.getenv("DROP_CACHE_LOC", "/proc/sys/vm/drop_caches")
_NCF_PREFIX = "NCF_RAW_"
# TODO(robieta): move line parsing to mlperf util
_PREFIX = r"(?:{})?:::MLPv([0-9]+).([0-9]+).([0-9]+)".format(_NCF_PREFIX)
_BENCHMARK = r"([a-zA-Z0-9_]+)"
_TIMESTAMP = r"([0-9]+\.[0-9]+)"
_CALLSITE = r"\((.+):([0-9]+)\)"
_TAG = r"([a-zA-Z0-9_]+)"
_VALUE = r"(.*)"
ParsedLine = namedtuple("ParsedLine", ["version", "benchmark", "timestamp",
"callsite", "tag", "value"])
LINE_PATTERN = re.compile(
"^{prefix} {benchmark} {timestamp} {callsite} {tag}(: |$){value}?$".format(
prefix=_PREFIX, benchmark=_BENCHMARK, timestamp=_TIMESTAMP,
callsite=_CALLSITE, tag=_TAG, value=_VALUE))
def parse_line(line): # type: (str) -> typing.Optional[ParsedLine]
match = LINE_PATTERN.match(line.strip())
if not match:
return
major, minor, micro, benchmark, timestamp = match.groups()[:5]
call_file, call_line, tag, _, value = match.groups()[5:]
return ParsedLine(version=(int(major), int(minor), int(micro)),
benchmark=benchmark, timestamp=timestamp,
callsite=(call_file, call_line), tag=tag, value=value)
def unparse_line(parsed_line): # type: (ParsedLine) -> str
version_str = "{}.{}.{}".format(*parsed_line.version)
callsite_str = "({}:{})".format(*parsed_line.callsite)
value_str = ": {}".format(parsed_line.value) if parsed_line.value else ""
return ":::MLPv{} {} {} {} {} {}".format(
version_str, parsed_line.benchmark, parsed_line.timestamp, callsite_str,
parsed_line.tag, value_str)
def get_mlperf_log():
"""Shielded import of mlperf_log module."""
try:
import mlperf_compliance
def test_mlperf_log_pip_version():
"""Check that mlperf_compliance is up to date."""
import pkg_resources
version = pkg_resources.get_distribution("mlperf_compliance")
version = tuple(int(i) for i in version.version.split("."))
if version < _MIN_VERSION:
logging.warning("mlperf_compliance is version {}, must be >= {}".format(
".".join([str(i) for i in version]),
".".join([str(i) for i in _MIN_VERSION])))
raise ImportError
return mlperf_compliance.mlperf_log
mlperf_log = test_mlperf_log_pip_version()
except ImportError:
mlperf_log = None
return mlperf_log
class Logger(object):
"""MLPerf logger indirection class.
This logger only logs for MLPerf runs, and prevents various errors associated
with not having the mlperf_compliance package installed.
"""
class Tags(object):
def __init__(self, mlperf_log):
self._enabled = False
self._mlperf_log = mlperf_log
def __getattr__(self, item):
if self._mlperf_log is None or not self._enabled:
return
return getattr(self._mlperf_log, item)
def __init__(self):
self._enabled = False
self._mlperf_log = get_mlperf_log()
self.tags = self.Tags(self._mlperf_log)
def __call__(self, enable=False):
if enable and self._mlperf_log is None:
raise ImportError("MLPerf logging was requested, but mlperf_compliance "
"module could not be loaded.")
self._enabled = enable
self.tags._enabled = enable
return self
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self._enabled = False
self.tags._enabled = False
@property
def log_file(self):
if self._mlperf_log is None:
return
return self._mlperf_log.LOG_FILE
@property
def enabled(self):
return self._enabled
def ncf_print(self, key, value=None, stack_offset=_STACK_OFFSET,
deferred=False, extra_print=False, prefix=_NCF_PREFIX):
if self._mlperf_log is None or not self.enabled:
return
self._mlperf_log.ncf_print(key=key, value=value, stack_offset=stack_offset,
deferred=deferred, extra_print=extra_print,
prefix=prefix)
def set_ncf_root(self, path):
if self._mlperf_log is None:
return
self._mlperf_log.ROOT_DIR_NCF = path
LOGGER = Logger()
ncf_print, set_ncf_root = LOGGER.ncf_print, LOGGER.set_ncf_root
TAGS = LOGGER.tags
def clear_system_caches():
if not LOGGER.enabled:
return
ret_code = subprocess.call(
["sync && echo 3 | {} tee {}".format(SUDO, DROP_CACHE_LOC)],
shell=True)
if ret_code:
raise ValueError("Failed to clear caches")
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
with LOGGER(True):
ncf_print(key=TAGS.RUN_START)
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions specific to running TensorFlow on TPUs."""
import tensorflow as tf
# "local" is a magic word in the TPU cluster resolver; it informs the resolver
# to use the local CPU as the compute device. This is useful for testing and
# debugging; the code flow is ostensibly identical, but without the need to
# actually have a TPU on the other end.
LOCAL = "local"
def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
"""Construct a host call to log scalars when training on TPU.
Args:
metric_dict: A dict of the tensors to be logged.
model_dir: The location to write the summary.
prefix: The prefix (if any) to prepend to the metric names.
Returns:
A tuple of (function, args_to_be_passed_to_said_function)
"""
# type: (dict, str) -> (function, list)
metric_names = list(metric_dict.keys())
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
global_step: `Tensor with shape `[batch]` for the global_step
*args: Remaining tensors to log.
Returns:
List of summary ops to run on the CPU host.
"""
step = global_step[0]
with tf.compat.v1.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.compat.v1.summary.always_record_summaries():
for i, name in enumerate(metric_names):
tf.compat.v1.summary.scalar(prefix + name, args[i][0], step=step)
return tf.compat.v1.summary.all_summary_ops()
# To log the current learning rate, and gradient norm for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
global_step_tensor = tf.reshape(
tf.compat.v1.train.get_or_create_global_step(), [1])
other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names]
return host_call_fn, [global_step_tensor] + other_tensors
def embedding_matmul(embedding_table, values, mask, name="embedding_matmul"):
"""Performs embedding lookup via a matmul.
The matrix to be multiplied by the embedding table Tensor is constructed
via an implementation of scatter based on broadcasting embedding indices
and performing an equality comparison against a broadcasted
range(num_embedding_table_rows). All masked positions will produce an
embedding vector of zeros.
Args:
embedding_table: Tensor of embedding table.
Rank 2 (table_size x embedding dim)
values: Tensor of embedding indices. Rank 2 (batch x n_indices)
mask: Tensor of mask / weights. Rank 2 (batch x n_indices)
name: Optional name scope for created ops
Returns:
Rank 3 tensor of embedding vectors.
"""
with tf.name_scope(name):
n_embeddings = embedding_table.get_shape().as_list()[0]
batch_size, padded_size = values.shape.as_list()
emb_idcs = tf.tile(
tf.reshape(values, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
emb_weights = tf.tile(
tf.reshape(mask, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
col_idcs = tf.tile(
tf.reshape(tf.range(n_embeddings), (1, 1, n_embeddings)),
(batch_size, padded_size, 1))
one_hot = tf.where(
tf.equal(emb_idcs, col_idcs), emb_weights,
tf.zeros((batch_size, padded_size, n_embeddings)))
return tf.tensordot(one_hot, embedding_table, 1)
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test TPU optimized matmul embedding."""
import numpy as np
import tensorflow as tf
from official.r1.utils import tpu as tpu_utils
TEST_CASES = [
dict(embedding_dim=256, vocab_size=1000, sequence_length=64,
batch_size=32, seed=54131),
dict(embedding_dim=8, vocab_size=15, sequence_length=12,
batch_size=256, seed=536413),
dict(embedding_dim=2048, vocab_size=512, sequence_length=50,
batch_size=8, seed=35124)
]
class TPUBaseTester(tf.test.TestCase):
def construct_embedding_and_values(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
np.random.seed(seed)
embeddings = np.random.random(size=(vocab_size, embedding_dim))
embedding_table = tf.convert_to_tensor(value=embeddings, dtype=tf.float32)
tokens = np.random.randint(low=1, high=vocab_size-1,
size=(batch_size, sequence_length))
for i in range(batch_size):
tokens[i, np.random.randint(low=0, high=sequence_length-1):] = 0
values = tf.convert_to_tensor(value=tokens, dtype=tf.int32)
mask = tf.cast(tf.not_equal(values, 0), dtype=tf.float32)
return embedding_table, values, mask
def _test_embedding(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
"""Test that matmul embedding matches embedding lookup (gather)."""
with self.test_session():
embedding_table, values, mask = self.construct_embedding_and_values(
embedding_dim=embedding_dim,
vocab_size=vocab_size,
sequence_length=sequence_length,
batch_size=batch_size,
seed=seed
)
embedding = (tf.nn.embedding_lookup(params=embedding_table, ids=values) *
tf.expand_dims(mask, -1))
matmul_embedding = tpu_utils.embedding_matmul(
embedding_table=embedding_table, values=values, mask=mask)
self.assertAllClose(embedding, matmul_embedding)
def _test_masking(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
"""Test that matmul embedding properly zeros masked positions."""
with self.test_session():
embedding_table, values, mask = self.construct_embedding_and_values(
embedding_dim=embedding_dim,
vocab_size=vocab_size,
sequence_length=sequence_length,
batch_size=batch_size,
seed=seed
)
matmul_embedding = tpu_utils.embedding_matmul(
embedding_table=embedding_table, values=values, mask=mask)
self.assertAllClose(matmul_embedding,
matmul_embedding * tf.expand_dims(mask, -1))
def test_embedding_0(self):
self._test_embedding(**TEST_CASES[0])
def test_embedding_1(self):
self._test_embedding(**TEST_CASES[1])
def test_embedding_2(self):
self._test_embedding(**TEST_CASES[2])
def test_masking_0(self):
self._test_masking(**TEST_CASES[0])
def test_masking_1(self):
self._test_masking(**TEST_CASES[1])
def test_masking_2(self):
self._test_masking(**TEST_CASES[2])
if __name__ == "__main__":
tf.test.main()
![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg)
![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen)
![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg)
# Predicting Income with the Census Income Dataset
The implementation is based on TensorFlow 1.x.
## Overview
The [Census Income Data Set](https://archive.ics.uci.edu/ml/datasets/Census+Income) contains over 48,000 samples with attributes including age, occupation, education, and income (a binary label, either `>50K` or `<=50K`). The dataset is split into roughly 32,000 training and 16,000 testing samples.
Here, we use the [wide and deep model](https://research.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) to predict the income labels. The **wide model** is able to memorize interactions with data with a large number of features but not able to generalize these learned interactions on new data. The **deep model** generalizes well but is unable to learn exceptions within the data. The **wide and deep model** combines the two models and is able to generalize while learning exceptions.
For the purposes of this example code, the Census Income Data Set was chosen to allow the model to train in a reasonable amount of time. You'll notice that the deep model performs almost as well as the wide and deep model on this dataset. The wide and deep model truly shines on larger data sets with high-cardinality features, where each feature has millions/billions of unique possible values (which is the specialty of the wide model).
Finally, a key point. As a modeler and developer, think about how this dataset is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve, or will it introduce bias? For more information, read about [ML fairness](https://developers.google.com/machine-learning/fairness-overview/).
---
The code sample in this directory uses the high level `tf.estimator.Estimator` API. This API is great for fast iteration and quickly adapting models to your own datasets without major code overhauls. It allows you to move from single-worker training to distributed training, and it makes it easy to export model binaries for prediction.
The input function for the `Estimator` uses `tf.contrib.data.TextLineDataset`, which creates a `Dataset` object. The `Dataset` API makes it easy to apply transformations (map, batch, shuffle, etc.) to the data. [Read more here](https://www.tensorflow.org/guide/datasets).
The `Estimator` and `Dataset` APIs are both highly encouraged for fast development and efficient training.
## Running the code
First make sure you've [added the models folder to your Python path](/official/#running-the-models); otherwise you may encounter an error like `ImportError: No module named official.wide_deep`.
### Setup
The [Census Income Data Set](https://archive.ics.uci.edu/ml/datasets/Census+Income) that this sample uses for training is hosted by the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/). We have provided a script that downloads and cleans the necessary files.
```
python census_dataset.py
```
This will download the files to `/tmp/census_data`. To change the directory, set the `--data_dir` flag.
### Training
You can run the code locally as follows:
```
python census_main.py
```
The model is saved to `/tmp/census_model` by default, which can be changed using the `--model_dir` flag.
To run the *wide* or *deep*-only models, set the `--model_type` flag to `wide` or `deep`. Other flags are configurable as well; see `census_main.py` for details.
The final accuracy should be over 83% with any of the three model types.
You can also experiment with `-inter` and `-intra` flag to explore inter/intra op parallelism for potential better performance as follows:
```
python census_main.py --inter=<int> --intra=<int>
```
Please note the above optional inter/intra op does not affect model accuracy. These are TensorFlow framework configurations that only affect execution time.
For more details regarding the above inter/intra flags, please refer to [Optimizing_for_CPU](https://www.tensorflow.org/performance/performance_guide#optimizing_for_cpu) or [TensorFlow config.proto source code](https://github.com/tensorflow/tensorflow/blob/26b4dfa65d360f2793ad75083c797d57f8661b93/tensorflow/core/protobuf/config.proto#L165).
### TensorBoard
Run TensorBoard to inspect the details about the graph and training progression.
```
tensorboard --logdir=/tmp/census_model
```
## Inference with SavedModel
You can export the model into Tensorflow [SavedModel](https://www.tensorflow.org/guide/saved_model) format by using the argument `--export_dir`:
```
python census_main.py --export_dir /tmp/wide_deep_saved_model
```
After the model finishes training, use [`saved_model_cli`](https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel) to inspect and execute the SavedModel.
Try the following commands to inspect the SavedModel:
**Replace `${TIMESTAMP}` with the folder produced (e.g. 1524249124)**
```
# List possible tag_sets. Only one metagraph is saved, so there will be one option.
saved_model_cli show --dir /tmp/wide_deep_saved_model/${TIMESTAMP}/
# Show SignatureDefs for tag_set=serve. SignatureDefs define the outputs to show.
saved_model_cli show --dir /tmp/wide_deep_saved_model/${TIMESTAMP}/ \
--tag_set serve --all
```
### Inference
Let's use the model to predict the income group of two examples:
```
saved_model_cli run --dir /tmp/wide_deep_saved_model/${TIMESTAMP}/ \
--tag_set serve --signature_def="predict" \
--input_examples='examples=[{"age":[46.], "education_num":[10.], "capital_gain":[7688.], "capital_loss":[0.], "hours_per_week":[38.]}, {"age":[24.], "education_num":[13.], "capital_gain":[0.], "capital_loss":[0.], "hours_per_week":[50.]}]'
```
This will print out the predicted classes and class probabilities. Class 0 is the <=50k group and 1 is the >50k group.
## Additional Links
If you are interested in distributed training, take a look at [Distributed TensorFlow](https://www.tensorflow.org/deploy/distributed).
You can also [run this model on Cloud ML Engine](https://cloud.google.com/ml-engine/docs/getting-started-training-prediction), which provides [hyperparameter tuning](https://cloud.google.com/ml-engine/docs/getting-started-training-prediction#hyperparameter_tuning) to maximize your model's results and enables [deploying your model for prediction](https://cloud.google.com/ml-engine/docs/getting-started-training-prediction#deploy_a_model_to_support_prediction).
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and clean the Census Income Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# pylint: disable=wrong-import-order
from absl import app as absl_app
from absl import flags
from six.moves import urllib
from six.moves import zip
import tensorflow.compat.v1 as tf
# pylint: enable=wrong-import-order
from official.utils.flags import core as flags_core
DATA_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult'
TRAINING_FILE = 'adult.data'
TRAINING_URL = '%s/%s' % (DATA_URL, TRAINING_FILE)
EVAL_FILE = 'adult.test'
EVAL_URL = '%s/%s' % (DATA_URL, EVAL_FILE)
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
_HASH_BUCKET_SIZE = 1000
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
def _download_and_clean_file(filename, url):
"""Downloads data from url, and makes changes to match the CSV format."""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.gfile.Open(temp_file, 'r') as temp_eval_file:
with tf.gfile.Open(filename, 'w') as eval_file:
for line in temp_eval_file:
line = line.strip()
line = line.replace(', ', ',')
if not line or ',' not in line:
continue
if line[-1] == '.':
line = line[:-1]
line += '\n'
eval_file.write(line)
tf.gfile.Remove(temp_file)
def download(data_dir):
"""Download census data if it is not already present."""
tf.gfile.MakeDirs(data_dir)
training_file_path = os.path.join(data_dir, TRAINING_FILE)
if not tf.gfile.Exists(training_file_path):
_download_and_clean_file(training_file_path, TRAINING_URL)
eval_file_path = os.path.join(data_dir, EVAL_FILE)
if not tf.gfile.Exists(eval_file_path):
_download_and_clean_file(eval_file_path, EVAL_URL)
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous variable columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=_HASH_BUCKET_SIZE)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=_HASH_BUCKET_SIZE),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'],
hash_bucket_size=_HASH_BUCKET_SIZE),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def input_fn(data_file, num_epochs, shuffle, batch_size):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have run census_dataset.py and '
'set the --data_dir argument to the correct path.' % data_file)
def parse_csv(value):
tf.logging.info('Parsing {}'.format(data_file))
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(list(zip(_CSV_COLUMNS, columns)))
labels = features.pop('income_bracket')
classes = tf.equal(labels, '>50K') # binary classification
return features, classes
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
dataset = dataset.map(parse_csv, num_parallel_calls=5)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
return dataset
def define_data_download_flags():
"""Add flags specifying data download arguments."""
flags.DEFINE_string(
name="data_dir", default="/tmp/census_data/",
help=flags_core.help_wrap(
"Directory to download and extract data."))
def main(_):
download(flags.FLAGS.data_dir)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_data_download_flags()
absl_app.run(main)
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train DNN on census income dataset."""
import os
from absl import app as absl_app
from absl import flags
import tensorflow.compat.v1 as tf
from official.r1.utils.logs import logger
from official.r1.wide_deep import census_dataset
from official.r1.wide_deep import wide_deep_run_loop
from official.utils.flags import core as flags_core
def define_census_flags():
wide_deep_run_loop.define_wide_deep_flags()
flags.adopt_module_key_flags(wide_deep_run_loop)
flags_core.set_defaults(data_dir='/tmp/census_data',
model_dir='/tmp/census_model',
train_epochs=40,
epochs_between_evals=2,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
batch_size=40)
def build_estimator(model_dir, model_type, model_column_fn, inter_op, intra_op):
"""Build an estimator appropriate for the given model type."""
wide_columns, deep_columns = model_column_fn()
hidden_units = [100, 75, 50, 25]
# Create a tf.estimator.RunConfig to ensure the model is run on CPU, which
# trains faster than GPU for this model.
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 0},
inter_op_parallelism_threads=inter_op,
intra_op_parallelism_threads=intra_op))
if model_type == 'wide':
return tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=wide_columns,
config=run_config)
elif model_type == 'deep':
return tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
config=run_config)
else:
return tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def run_census(flags_obj):
"""Construct all necessary functions and call run_loop.
Args:
flags_obj: Object containing user specified flags.
"""
if flags_obj.download_if_missing:
census_dataset.download(flags_obj.data_dir)
train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE)
test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE)
# Train and evaluate the model every `flags.epochs_between_evals` epochs.
def train_input_fn():
return census_dataset.input_fn(
train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)
def eval_input_fn():
return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)
tensors_to_log = {
'average_loss': '{loss_prefix}head/truediv',
'loss': '{loss_prefix}head/weighted_loss/Sum'
}
wide_deep_run_loop.run_loop(
name="Census Income", train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
model_column_fn=census_dataset.build_model_columns,
build_estimator_fn=build_estimator,
flags_obj=flags_obj,
tensors_to_log=tensors_to_log,
early_stop=True)
def main(_):
with logger.benchmark_context(flags.FLAGS):
run_census(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_census_flags()
absl_app.run(main)
39,State-gov,77516,Bachelors,13,Never-married,Adm-clerical,Not-in-family,,,2174,0,40,,<=50K
50,Self-emp-not-inc,83311,Bachelors,13,Married-civ-spouse,Exec-managerial,Husband,,,0,0,13,,<=50K
38,Private,215646,HS-grad,9,Divorced,Handlers-cleaners,Not-in-family,,,0,0,40,,<=50K
53,Private,234721,11th,7,Married-civ-spouse,Handlers-cleaners,Husband,,,0,0,40,,<=50K
28,Private,338409,Bachelors,13,Married-civ-spouse,Prof-specialty,Wife,,,0,0,40,,<=50K
37,Private,284582,Masters,14,Married-civ-spouse,Exec-managerial,Wife,,,0,0,40,,<=50K
49,Private,160187,9th,5,Married-spouse-absent,Other-service,Not-in-family,,,0,0,16,,<=50K
52,Self-emp-not-inc,209642,HS-grad,9,Married-civ-spouse,Exec-managerial,Husband,,,0,0,45,,>50K
31,Private,45781,Masters,14,Never-married,Prof-specialty,Not-in-family,,,14084,0,50,,>50K
42,Private,159449,Bachelors,13,Married-civ-spouse,Exec-managerial,Husband,,,5178,0,40,,>50K
37,Private,280464,Some-college,10,Married-civ-spouse,Exec-managerial,Husband,,,0,0,80,,>50K
30,State-gov,141297,Bachelors,13,Married-civ-spouse,Prof-specialty,Husband,,,0,0,40,,>50K
23,Private,122272,Bachelors,13,Never-married,Adm-clerical,Own-child,,,0,0,30,,<=50K
32,Private,205019,Assoc-acdm,12,Never-married,Sales,Not-in-family,,,0,0,50,,<=50K
40,Private,121772,Assoc-voc,11,Married-civ-spouse,Craft-repair,Husband,,,0,0,40,,>50K
34,Private,245487,7th-8th,4,Married-civ-spouse,Transport-moving,Husband,,,0,0,45,,<=50K
25,Self-emp-not-inc,176756,HS-grad,9,Never-married,Farming-fishing,Own-child,,,0,0,35,,<=50K
32,Private,186824,HS-grad,9,Never-married,Machine-op-inspct,Unmarried,,,0,0,40,,<=50K
38,Private,28887,11th,7,Married-civ-spouse,Sales,Husband,,,0,0,50,,<=50K
43,Self-emp-not-inc,292175,Masters,14,Divorced,Exec-managerial,Unmarried,,,0,0,45,,>50K
40,Private,193524,Doctorate,16,Married-civ-spouse,Prof-specialty,Husband,,,0,0,60,,>50K
56,Local-gov,216851,Bachelors,13,Married-civ-spouse,Tech-support,Husband,,,0,0,40,,>50K
54,?,180211,Some-college,10,Married-civ-spouse,?,Husband,,,0,0,60,,>50K
22,State-gov,311512,Some-college,10,Married-civ-spouse,Other-service,Husband,,,0,0,15,,<=50K
31,Private,84154,Some-college,10,Married-civ-spouse,Sales,Husband,,,0,0,38,,>50K
57,Federal-gov,337895,Bachelors,13,Married-civ-spouse,Prof-specialty,Husband,,,0,0,40,,>50K
47,Private,51835,Prof-school,15,Married-civ-spouse,Prof-specialty,Wife,,,0,1902,60,,>50K
50,Federal-gov,251585,Bachelors,13,Divorced,Exec-managerial,Not-in-family,,,0,0,55,,>50K
25,Private,289980,HS-grad,9,Never-married,Handlers-cleaners,Not-in-family,,,0,0,35,,<=50K
42,Private,116632,Doctorate,16,Married-civ-spouse,Prof-specialty,Husband,,,0,0,45,,>50K
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow.compat.v1 as tf
from official.r1.wide_deep import census_dataset
from official.r1.wide_deep import census_main
from official.utils.testing import integration
logging.set_verbosity(logging.ERROR)
TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,'
'Husband,zyx,wvu,34,56,78,tsr,<=50K')
TEST_INPUT_VALUES = {
'age': 18,
'education_num': 12,
'capital_gain': 34,
'capital_loss': 56,
'hours_per_week': 78,
'education': 'Bachelors',
'marital_status': 'Married-civ-spouse',
'relationship': 'Husband',
'workclass': 'Self-emp-not-inc',
'occupation': 'abc',
}
TEST_CSV = os.path.join(os.path.dirname(__file__), 'census_test.csv')
class BaseTest(tf.test.TestCase):
"""Tests for Wide Deep model."""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(BaseTest, cls).setUpClass()
census_main.define_census_flags()
def setUp(self):
# Create temporary CSV file
self.temp_dir = self.get_temp_dir()
self.input_csv = os.path.join(self.temp_dir, 'test.csv')
with tf.io.gfile.GFile(self.input_csv, 'w') as temp_csv:
temp_csv.write(TEST_INPUT)
with tf.io.gfile.GFile(TEST_CSV, 'r') as temp_csv:
test_csv_contents = temp_csv.read()
# Used for end-to-end tests.
for fname in [census_dataset.TRAINING_FILE, census_dataset.EVAL_FILE]:
with tf.io.gfile.GFile(
os.path.join(self.temp_dir, fname), 'w') as test_csv:
test_csv.write(test_csv_contents)
def test_input_fn(self):
dataset = census_dataset.input_fn(self.input_csv, 1, False, 1)
features, labels = dataset.make_one_shot_iterator().get_next()
with self.test_session() as sess:
features, labels = sess.run((features, labels))
# Compare the two features dictionaries.
for key in TEST_INPUT_VALUES:
self.assertTrue(key in features)
self.assertEqual(len(features[key]), 1)
feature_value = features[key][0]
# Convert from bytes to string for Python 3.
if isinstance(feature_value, bytes):
feature_value = feature_value.decode()
self.assertEqual(TEST_INPUT_VALUES[key], feature_value)
self.assertFalse(labels)
def build_and_test_estimator(self, model_type):
"""Ensure that model trains and minimizes loss."""
model = census_main.build_estimator(
self.temp_dir, model_type,
model_column_fn=census_dataset.build_model_columns,
inter_op=0, intra_op=0)
# Train for 1 step to initialize model and evaluate initial loss
def get_input_fn(num_epochs, shuffle, batch_size):
def input_fn():
return census_dataset.input_fn(
TEST_CSV, num_epochs=num_epochs, shuffle=shuffle,
batch_size=batch_size)
return input_fn
model.train(input_fn=get_input_fn(1, True, 1), steps=1)
initial_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
# Train for 100 epochs at batch size 3 and evaluate final loss
model.train(input_fn=get_input_fn(100, True, 3))
final_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
print('%s initial results:' % model_type, initial_results)
print('%s final results:' % model_type, final_results)
# Ensure loss has decreased, while accuracy and both AUCs have increased.
self.assertLess(final_results['loss'], initial_results['loss'])
self.assertGreater(final_results['auc'], initial_results['auc'])
self.assertGreater(final_results['auc_precision_recall'],
initial_results['auc_precision_recall'])
self.assertGreater(final_results['accuracy'], initial_results['accuracy'])
def test_wide_deep_estimator_training(self):
self.build_and_test_estimator('wide_deep')
def test_end_to_end_wide(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide',
'--download_if_missing=false'
],
synth=False)
def test_end_to_end_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'deep',
'--download_if_missing=false'
],
synth=False)
def test_end_to_end_wide_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide_deep',
'--download_if_missing=false'
],
synth=False)
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepare MovieLens dataset for wide-deep."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# pylint: disable=wrong-import-order
from absl import app as absl_app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: enable=wrong-import-order
from official.recommendation import movielens
from official.r1.utils.data import file_io
from official.utils.flags import core as flags_core
_BUFFER_SUBDIR = "wide_deep_buffer"
_FEATURE_MAP = {
movielens.USER_COLUMN: tf.compat.v1.FixedLenFeature([1], dtype=tf.int64),
movielens.ITEM_COLUMN: tf.compat.v1.FixedLenFeature([1], dtype=tf.int64),
movielens.TIMESTAMP_COLUMN: tf.compat.v1.FixedLenFeature([1],
dtype=tf.int64),
movielens.GENRE_COLUMN: tf.compat.v1.FixedLenFeature(
[movielens.N_GENRE], dtype=tf.int64),
movielens.RATING_COLUMN: tf.compat.v1.FixedLenFeature([1],
dtype=tf.float32),
}
_BUFFER_SIZE = {
movielens.ML_1M: {"train": 107978119, "eval": 26994538},
movielens.ML_20M: {"train": 2175203810, "eval": 543802008}
}
_USER_EMBEDDING_DIM = 16
_ITEM_EMBEDDING_DIM = 64
def build_model_columns(dataset):
"""Builds a set of wide and deep feature columns."""
user_id = tf.feature_column.categorical_column_with_vocabulary_list(
movielens.USER_COLUMN, range(1, movielens.NUM_USER_IDS[dataset]))
user_embedding = tf.feature_column.embedding_column(
user_id, _USER_EMBEDDING_DIM, max_norm=np.sqrt(_USER_EMBEDDING_DIM))
item_id = tf.feature_column.categorical_column_with_vocabulary_list(
movielens.ITEM_COLUMN, range(1, movielens.NUM_ITEM_IDS))
item_embedding = tf.feature_column.embedding_column(
item_id, _ITEM_EMBEDDING_DIM, max_norm=np.sqrt(_ITEM_EMBEDDING_DIM))
time = tf.feature_column.numeric_column(movielens.TIMESTAMP_COLUMN)
genres = tf.feature_column.numeric_column(
movielens.GENRE_COLUMN, shape=(movielens.N_GENRE,), dtype=tf.uint8)
deep_columns = [user_embedding, item_embedding, time, genres]
wide_columns = []
return wide_columns, deep_columns
def _deserialize(examples_serialized):
features = tf.parse_example(examples_serialized, _FEATURE_MAP)
return features, features[movielens.RATING_COLUMN] / movielens.MAX_RATING
def _buffer_path(data_dir, dataset, name):
return os.path.join(data_dir, _BUFFER_SUBDIR,
"{}_{}_buffer".format(dataset, name))
def _df_to_input_fn(df, name, dataset, data_dir, batch_size, repeat, shuffle):
"""Serialize a dataframe and write it to a buffer file."""
buffer_path = _buffer_path(data_dir, dataset, name)
expected_size = _BUFFER_SIZE[dataset].get(name)
file_io.write_to_buffer(
dataframe=df, buffer_path=buffer_path,
columns=list(_FEATURE_MAP.keys()), expected_size=expected_size)
def input_fn():
dataset = tf.data.TFRecordDataset(buffer_path)
# batch comes before map because map can deserialize multiple examples.
dataset = dataset.batch(batch_size)
dataset = dataset.map(_deserialize, num_parallel_calls=16)
if shuffle:
dataset = dataset.shuffle(shuffle)
dataset = dataset.repeat(repeat)
return dataset.prefetch(1)
return input_fn
def _check_buffers(data_dir, dataset):
train_path = os.path.join(data_dir, _BUFFER_SUBDIR,
"{}_{}_buffer".format(dataset, "train"))
eval_path = os.path.join(data_dir, _BUFFER_SUBDIR,
"{}_{}_buffer".format(dataset, "eval"))
if not tf.gfile.Exists(train_path) or not tf.gfile.Exists(eval_path):
return False
return all([
tf.gfile.Stat(_buffer_path(data_dir, dataset, "train")).length ==
_BUFFER_SIZE[dataset]["train"],
tf.gfile.Stat(_buffer_path(data_dir, dataset, "eval")).length ==
_BUFFER_SIZE[dataset]["eval"],
])
def construct_input_fns(dataset, data_dir, batch_size=16, repeat=1):
"""Construct train and test input functions, as well as the column fn."""
if _check_buffers(data_dir, dataset):
train_df, eval_df = None, None
else:
df = movielens.csv_to_joint_dataframe(dataset=dataset, data_dir=data_dir)
df = movielens.integerize_genres(dataframe=df)
df = df.drop(columns=[movielens.TITLE_COLUMN])
train_df = df.sample(frac=0.8, random_state=0)
eval_df = df.drop(train_df.index)
train_df = train_df.reset_index(drop=True)
eval_df = eval_df.reset_index(drop=True)
train_input_fn = _df_to_input_fn(
df=train_df, name="train", dataset=dataset, data_dir=data_dir,
batch_size=batch_size, repeat=repeat,
shuffle=movielens.NUM_RATINGS[dataset])
eval_input_fn = _df_to_input_fn(
df=eval_df, name="eval", dataset=dataset, data_dir=data_dir,
batch_size=batch_size, repeat=repeat, shuffle=None)
model_column_fn = functools.partial(build_model_columns, dataset=dataset)
train_input_fn()
return train_input_fn, eval_input_fn, model_column_fn
def main(_):
movielens.download(dataset=flags.FLAGS.dataset, data_dir=flags.FLAGS.data_dir)
construct_input_fns(flags.FLAGS.dataset, flags.FLAGS.data_dir)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
movielens.define_data_download_flags()
flags.adopt_module_key_flags(movielens)
flags_core.set_defaults(dataset="ml-1m")
absl_app.run(main)
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train DNN on Kaggle movie dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app as absl_app
from absl import flags
import tensorflow.compat.v1 as tf
from official.r1.utils.logs import logger
from official.r1.wide_deep import movielens_dataset
from official.r1.wide_deep import wide_deep_run_loop
from official.recommendation import movielens
from official.utils.flags import core as flags_core
def define_movie_flags():
"""Define flags for movie dataset training."""
wide_deep_run_loop.define_wide_deep_flags()
flags.DEFINE_enum(
name="dataset", default=movielens.ML_1M,
enum_values=movielens.DATASETS, case_sensitive=False,
help=flags_core.help_wrap("Dataset to be trained and evaluated."))
flags.adopt_module_key_flags(wide_deep_run_loop)
flags_core.set_defaults(data_dir="/tmp/movielens-data/",
model_dir='/tmp/movie_model',
model_type="deep",
train_epochs=50,
epochs_between_evals=5,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
batch_size=256)
@flags.validator("stop_threshold",
message="stop_threshold not supported for movielens model")
def _no_stop(stop_threshold):
return stop_threshold is None
def build_estimator(model_dir, model_type, model_column_fn, inter_op, intra_op):
"""Build an estimator appropriate for the given model type."""
if model_type != "deep":
raise NotImplementedError("movie dataset only supports `deep` model_type")
_, deep_columns = model_column_fn()
hidden_units = [256, 256, 256, 128]
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 0},
inter_op_parallelism_threads=inter_op,
intra_op_parallelism_threads=intra_op))
return tf.estimator.DNNRegressor(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
optimizer=tf.compat.v1.train.AdamOptimizer(),
activation_fn=tf.nn.sigmoid,
dropout=0.3,
loss_reduction=tf.losses.Reduction.MEAN)
def run_movie(flags_obj):
"""Construct all necessary functions and call run_loop.
Args:
flags_obj: Object containing user specified flags.
"""
if flags_obj.download_if_missing:
movielens.download(dataset=flags_obj.dataset, data_dir=flags_obj.data_dir)
train_input_fn, eval_input_fn, model_column_fn = \
movielens_dataset.construct_input_fns(
dataset=flags_obj.dataset, data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size, repeat=flags_obj.epochs_between_evals)
tensors_to_log = {
'loss': '{loss_prefix}head/weighted_loss/value'
}
wide_deep_run_loop.run_loop(
name="MovieLens", train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
model_column_fn=model_column_fn,
build_estimator_fn=build_estimator,
flags_obj=flags_obj,
tensors_to_log=tensors_to_log,
early_stop=False)
def main(_):
with logger.benchmark_context(flags.FLAGS):
run_movie(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_movie_flags()
absl_app.run(main)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core run logic for TensorFlow Wide & Deep Tutorial using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl import app as absl_app
from absl import flags
import tensorflow.compat.v1 as tf
from official.r1.utils.logs import hooks_helper
from official.r1.utils.logs import logger
from official.utils.flags import core as flags_core
from official.utils.misc import model_helpers
LOSS_PREFIX = {'wide': 'linear/', 'deep': 'dnn/'}
def define_wide_deep_flags():
"""Add supervised learning flags, as well as wide-deep model type."""
flags_core.define_base(clean=True, train_epochs=True,
epochs_between_evals=True, stop_threshold=True,
hooks=True, export_dir=True)
flags_core.define_benchmark()
flags_core.define_performance(
num_parallel_calls=False, inter_op=True, intra_op=True,
synthetic_data=False, max_train_steps=False, dtype=False,
all_reduce_alg=False)
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_enum(
name="model_type", short_name="mt", default="wide_deep",
enum_values=['wide', 'deep', 'wide_deep'],
help="Select model topology.")
flags.DEFINE_boolean(
name="download_if_missing", default=True, help=flags_core.help_wrap(
"Download data to data_dir if it is not already present."))
def export_model(model, model_type, export_dir, model_column_fn):
"""Export to SavedModel format.
Args:
model: Estimator object
model_type: string indicating model type. "wide", "deep" or "wide_deep"
export_dir: directory to export the model.
model_column_fn: Function to generate model feature columns.
"""
wide_columns, deep_columns = model_column_fn()
if model_type == 'wide':
columns = wide_columns
elif model_type == 'deep':
columns = deep_columns
else:
columns = wide_columns + deep_columns
feature_spec = tf.feature_column.make_parse_example_spec(columns)
example_input_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))
model.export_savedmodel(export_dir, example_input_fn,
strip_default_attrs=True)
def run_loop(name, train_input_fn, eval_input_fn, model_column_fn,
build_estimator_fn, flags_obj, tensors_to_log, early_stop=False):
"""Define training loop."""
model_helpers.apply_clean(flags.FLAGS)
model = build_estimator_fn(
model_dir=flags_obj.model_dir, model_type=flags_obj.model_type,
model_column_fn=model_column_fn,
inter_op=flags_obj.inter_op_parallelism_threads,
intra_op=flags_obj.intra_op_parallelism_threads)
run_params = {
'batch_size': flags_obj.batch_size,
'train_epochs': flags_obj.train_epochs,
'model_type': flags_obj.model_type,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info('wide_deep', name, run_params,
test_id=flags_obj.benchmark_test_id)
loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '')
tensors_to_log = {k: v.format(loss_prefix=loss_prefix)
for k, v in tensors_to_log.items()}
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log)
# Train and evaluate the model every `flags.epochs_between_evals` epochs.
for n in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
model.train(input_fn=train_input_fn, hooks=train_hooks)
results = model.evaluate(input_fn=eval_input_fn)
# Display evaluation metrics
tf.logging.info('Results at epoch %d / %d',
(n + 1) * flags_obj.epochs_between_evals,
flags_obj.train_epochs)
tf.logging.info('-' * 60)
for key in sorted(results):
tf.logging.info('%s: %s' % (key, results[key]))
benchmark_logger.log_evaluation_result(results)
if early_stop and model_helpers.past_stop_threshold(
flags_obj.stop_threshold, results['accuracy']):
break
# Export the model
if flags_obj.export_dir is not None:
export_model(model, flags_obj.model_type, flags_obj.export_dir,
model_column_fn)
......@@ -16,10 +16,13 @@ dataclasses
gin-config
tf_slim>=1.1.0
typing
sentencepiece
Cython
matplotlib
opencv-python-headless
pyyaml
# CV related dependencies
opencv-python-headless
Pillow
-e git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI
# NLP related dependencies
seqeval
sentencepiece
......@@ -41,12 +41,13 @@ class BatchTimestamp(object):
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps, logdir=None):
def __init__(self, batch_size, log_steps, initial_step=0, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
initial_step: Optional, initial step.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
......@@ -54,8 +55,8 @@ class TimeHistory(tf.keras.callbacks.Callback):
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = 0
self.steps_before_epoch = 0
self.last_log_step = initial_step
self.steps_before_epoch = initial_step
self.steps_in_epoch = 0
self.start_time = None
......
......@@ -48,6 +48,22 @@ so the checkpoints are not compatible.
We will unify the implementation soon.
### Train a SpineNet-49 based RetinaNet.
```bash
TPU_NAME="<your GCP TPU name>"
MODEL_DIR="<path to the directory to store model files>"
TRAIN_FILE_PATTERN="<path to the TFRecord training data>"
EVAL_FILE_PATTERN="<path to the TFRecord validation data>"
VAL_JSON_FILE="<path to the validation annotation JSON file>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=tpu \
--tpu="${TPU_NAME?}" \
--model_dir="${MODEL_DIR?}" \
--mode=train \
--params_override="{ type: retinanet, architecture: {backbone: spinenet, multilevel_features: identity}, spinenet: {model_id: 49}, train_file_pattern: ${TRAIN_FILE_PATTERN?} }, eval: { val_json_file: ${VAL_JSON_FILE?}, eval_file_pattern: ${EVAL_FILE_PATTERN?} } }"
```
### Train a custom RetinaNet using the config file.
......@@ -123,8 +139,6 @@ predict:
predict_batch_size: 8
architecture:
use_bfloat16: False
retinanet_parser:
use_bfloat16: False
train:
total_steps: 1
batch_size: 8
......@@ -165,6 +179,24 @@ so the checkpoints are not compatible.
We will unify the implementation soon.
### Train a SpineNet-49 based Mask R-CNN.
```bash
TPU_NAME="<your GCP TPU name>"
MODEL_DIR="<path to the directory to store model files>"
TRAIN_FILE_PATTERN="<path to the TFRecord training data>"
EVAL_FILE_PATTERN="<path to the TFRecord validation data>"
VAL_JSON_FILE="<path to the validation annotation JSON file>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=tpu \
--tpu="${TPU_NAME?}" \
--model_dir="${MODEL_DIR?}" \
--mode=train \
--model=mask_rcnn \
--params_override="{architecture: {backbone: spinenet, multilevel_features: identity}, spinenet: {model_id: 49}, train_file_pattern: ${TRAIN_FILE_PATTERN?} }, eval: { val_json_file: ${VAL_JSON_FILE?}, eval_file_pattern: ${EVAL_FILE_PATTERN?} } }"
```
### Train a custom Mask R-CNN using the config file.
First, create a YAML config file, e.g. *my_maskrcnn.yaml*.
......@@ -245,8 +277,6 @@ predict:
predict_batch_size: 8
architecture:
use_bfloat16: False
maskrcnn_parser:
use_bfloat16: False
train:
total_steps: 1000
batch_size: 8
......@@ -255,6 +285,140 @@ use_tpu: False
"
```
## Train ShapeMask on TPU
### Train a ResNet-50 based ShapeMask.
```bash
TPU_NAME="<your GCP TPU name>"
MODEL_DIR="<path to the directory to store model files>"
RESNET_CHECKPOINT="<path to the pre-trained Resnet-50 checkpoint>"
TRAIN_FILE_PATTERN="<path to the TFRecord training data>"
EVAL_FILE_PATTERN="<path to the TFRecord validation data>"
VAL_JSON_FILE="<path to the validation annotation JSON file>"
SHAPE_PRIOR_PATH="<path to shape priors>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=tpu \
--tpu=${TPU_NAME} \
--model_dir=${MODEL_DIR} \
--mode=train \
--model=shapemask \
--params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN} }, eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN} } shapemask_head: {use_category_for_mask: true, shape_prior_path: ${SHAPE_PRIOR_PATH}} }"
```
The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz).
The shape priors can be downloaded [here]
(https://storage.googleapis.com/cloud-tpu-checkpoints/shapemask/kmeans_class_priors_91x20x32x32.npy)
### Train a custom ShapeMask using the config file.
First, create a YAML config file, e.g. *my_shapemask.yaml*.
This file specifies the parameters to be overridden:
```YAML
# my_shapemask.yaml
train:
train_file_pattern: <path to the TFRecord training data>
total_steps: <total steps to train>
batch_size: <training batch size>
eval:
eval_file_pattern: <path to the TFRecord validation data>
val_json_file: <path to the validation annotation JSON file>
batch_size: <evaluation batch size>
shapemask_head:
shape_prior_path: <path to shape priors>
```
Once the YAML config file is created, you can launch the training using the
following command.
```bash
TPU_NAME="<your GCP TPU name>"
MODEL_DIR="<path to the directory to store model files>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=tpu \
--tpu=${TPU_NAME} \
--model_dir=${MODEL_DIR} \
--mode=train \
--model=shapemask \
--config_file="my_shapemask.yaml"
```
## Train ShapeMask on GPU
Training on GPU is similar to that on TPU. The major change is the strategy type
(use
"[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)"
for multiple GPU and
"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)"
for single GPU).
Multi-GPUs example (assuming there are 8GPU connected to the host):
```bash
MODEL_DIR="<path to the directory to store model files>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=mirrored \
--num_gpus=8 \
--model_dir=${MODEL_DIR} \
--mode=train \
--model=shapemask \
--config_file="my_shapemask.yaml"
```
A single GPU example
```bash
MODEL_DIR="<path to the directory to store model files>"
python3 ~/models/official/vision/detection/main.py \
--strategy_type=one_device \
--num_gpus=1 \
--model_dir=${MODEL_DIR} \
--mode=train \
--model=shapemask \
--config_file="my_shapemask.yaml"
```
An example with inline configuration (YAML or JSON format):
```
python3 ~/models/official/vision/detection/main.py \
--model_dir=<model folder> \
--strategy_type=one_device \
--num_gpus=1 \
--mode=train \
--model=shapemask \
--params_override="eval:
eval_file_pattern: <Eval TFRecord file pattern>
batch_size: 8
val_json_file: <COCO format groundtruth JSON file>
train:
total_steps: 1000
batch_size: 8
train_file_pattern: <Eval TFRecord file pattern>
use_tpu: False
"
```
### Run the evaluation (after training)
```
python3 /usr/share/models/official/vision/detection/main.py \
--strategy_type=tpu \
--tpu=${TPU_NAME} \
--model_dir=${MODEL_DIR} \
--mode=eval \
--model=shapemask \
--params_override="{eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN}, eval_samples: 5000 } }"
```
`MODEL_DIR` needs to point to the trained path of ShapeMask model.
Change `strategy_type=mirrored` and `num_gpus=1` to run on a GPU.
Note: The JSON groundtruth file is useful for [COCO dataset](http://cocodataset.org/#home) and can be
downloaded from the [COCO website](http://cocodataset.org/#download). For custom dataset, it is unncessary because the groundtruth can be included in the TFRecord files.
......
......@@ -17,10 +17,12 @@
BACKBONES = [
'resnet',
'spinenet',
]
MULTILEVEL_FEATURES = [
'fpn',
'identity',
]
# pylint: disable=line-too-long
......@@ -118,6 +120,9 @@ BASE_CFG = {
'resnet': {
'resnet_depth': 50,
},
'spinenet': {
'model_id': '49',
},
'fpn': {
'fpn_feat_dims': 256,
'use_separable_conv': False,
......
......@@ -185,12 +185,12 @@ class Parser(object):
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
num_groundtruths = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtruths, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
lambda: tf.cast(tf.range(num_groundtruths), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
if self._include_mask:
......
......@@ -19,25 +19,28 @@ from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import functools
import os
import pprint
# pylint: disable=g-bad-import-order
import tensorflow as tf
from absl import app
from absl import flags
from absl import logging
# pylint: enable=g-bad-import-order
from official.modeling.hyperparams import params_dict
from official.modeling.training import distributed_executor as executor
from official.utils import hyperparams_flags
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.vision.detection.configs import factory as config_factory
from official.vision.detection.dataloader import input_reader
from official.vision.detection.dataloader import mode_keys as ModeKeys
from official.vision.detection.executor.detection_executor import DetectionDistributedExecutor
from official.vision.detection.modeling import factory as model_factory
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
hyperparams_flags.initialize_common_flags()
flags_core.define_log_steps()
......@@ -194,6 +197,20 @@ def run(callbacks=None):
'strategy_config': executor.strategy_flags_dict(),
},
is_strict=False)
# Make sure use_tpu and strategy_type are in sync.
params.use_tpu = (params.strategy_type == 'tpu')
if not params.use_tpu:
params.override({
'architecture': {
'use_bfloat16': False,
},
'norm_activation': {
'use_sync_bn': False,
},
}, is_strict=True)
params.validate()
params.lock()
pp = pprint.PrettyPrinter()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment