logger.py 11.3 KB
Newer Older
Scott Zhu's avatar
Scott Zhu committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

16
17
18
"""Logging utilities for benchmark.

For collecting local environment metrics like CPU and memory, certain python
19
packages need be installed. See README for details.
20
"""
Scott Zhu's avatar
Scott Zhu committed
21
22
23
24
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

25
import contextlib
Scott Zhu's avatar
Scott Zhu committed
26
27
import datetime
import json
28
import multiprocessing
Scott Zhu's avatar
Scott Zhu committed
29
30
import numbers
import os
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
31
import threading
32
import uuid
Scott Zhu's avatar
Scott Zhu committed
33

34
35
from six.moves import _thread as thread
from absl import flags
Scott Zhu's avatar
Scott Zhu committed
36
import tensorflow as tf
37
from tensorflow.python.client import device_lib
38
from absl import logging
Scott Zhu's avatar
Scott Zhu committed
39

40
41
from official.utils.logs import cloud_lib

42
43
METRIC_LOG_FILE_NAME = "metric.log"
BENCHMARK_RUN_LOG_FILE_NAME = "benchmark_run.log"
Scott Zhu's avatar
Scott Zhu committed
44
_DATE_TIME_FORMAT_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ"
45
GCP_TEST_ENV = "GCP"
46
47
48
RUN_STATUS_SUCCESS = "success"
RUN_STATUS_FAILURE = "failure"
RUN_STATUS_RUNNING = "running"
Scott Zhu's avatar
Scott Zhu committed
49

50

51
FLAGS = flags.FLAGS
Scott Zhu's avatar
Scott Zhu committed
52

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
53
54
55
# Don't use it directly. Use get_benchmark_logger to access a logger.
_benchmark_logger = None
_logger_lock = threading.Lock()
Scott Zhu's avatar
Scott Zhu committed
56
57


58
def config_benchmark_logger(flag_obj=None):
Karmel Allison's avatar
Karmel Allison committed
59
  """Config the global benchmark logger."""
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
60
61
62
  _logger_lock.acquire()
  try:
    global _benchmark_logger
63
64
65
    if not flag_obj:
      flag_obj = FLAGS

Karmel Allison's avatar
Karmel Allison committed
66
67
    if (not hasattr(flag_obj, "benchmark_logger_type") or
        flag_obj.benchmark_logger_type == "BaseBenchmarkLogger"):
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
68
      _benchmark_logger = BaseBenchmarkLogger()
Karmel Allison's avatar
Karmel Allison committed
69
    elif flag_obj.benchmark_logger_type == "BenchmarkFileLogger":
70
71
      _benchmark_logger = BenchmarkFileLogger(flag_obj.benchmark_log_dir)
    else:
Karmel Allison's avatar
Karmel Allison committed
72
73
      raise ValueError("Unrecognized benchmark_logger_type: %s"
                       % flag_obj.benchmark_logger_type)
74

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
75
76
77
78
79
80
81
  finally:
    _logger_lock.release()
  return _benchmark_logger


def get_benchmark_logger():
  if not _benchmark_logger:
82
    config_benchmark_logger()
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
83
84
85
  return _benchmark_logger


86
87
88
89
90
91
92
93
94
95
96
97
98
@contextlib.contextmanager
def benchmark_context(flag_obj):
  """Context of benchmark, which will update status of the run accordingly."""
  benchmark_logger = config_benchmark_logger(flag_obj)
  try:
    yield
    benchmark_logger.on_finish(RUN_STATUS_SUCCESS)
  except Exception:  # pylint: disable=broad-except
    # Catch all the exception, update the run status to be failure, and re-raise
    benchmark_logger.on_finish(RUN_STATUS_FAILURE)
    raise


Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
99
100
101
102
103
class BaseBenchmarkLogger(object):
  """Class to log the benchmark information to STDOUT."""

  def log_evaluation_result(self, eval_results):
    """Log the evaluation result.
104

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
105
    The evaluate result is a dictionary that contains metrics defined in
106
107
108
109
    model_fn. It also contains a entry for global_step which contains the value
    of the global step when evaluation was performed.

    Args:
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
110
      eval_results: dict, the result of evaluate.
111
112
    """
    if not isinstance(eval_results, dict):
113
114
      logging.warning("eval_results should be dictionary for logging. Got %s",
                      type(eval_results))
115
      return
116
    global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP]
117
    for key in sorted(eval_results):
118
      if key != tf.compat.v1.GraphKeys.GLOBAL_STEP:
119
120
        self.log_metric(key, eval_results[key], global_step=global_step)

Scott Zhu's avatar
Scott Zhu committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
  def log_metric(self, name, value, unit=None, global_step=None, extras=None):
    """Log the benchmark metric information to local file.

    Currently the logging is done in a synchronized way. This should be updated
    to log asynchronously.

    Args:
      name: string, the name of the metric to log.
      value: number, the value of the metric. The value will not be logged if it
        is not a number type.
      unit: string, the unit of the metric, E.g "image per second".
      global_step: int, the global_step when the metric is logged.
      extras: map of string:string, the extra information about the metric.
    """
135
136
    metric = _process_metric_to_json(name, value, unit, global_step, extras)
    if metric:
137
      logging.info("Benchmark metric: %s", metric)
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
138

139
  def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
140
    logging.info(
141
142
        "Benchmark run: %s",
        _gather_run_info(model_name, dataset_name, run_params, test_id))
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
143

144
145
146
  def on_finish(self, status):
    pass

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
147
148
149
150
151
152
153

class BenchmarkFileLogger(BaseBenchmarkLogger):
  """Class to log the benchmark information to local disk."""

  def __init__(self, logging_dir):
    super(BenchmarkFileLogger, self).__init__()
    self._logging_dir = logging_dir
154
155
156
    if not tf.io.gfile.isdir(self._logging_dir):
      tf.io.gfile.makedirs(self._logging_dir)
    self._metric_file_handler = tf.io.gfile.GFile(
157
        os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), "a")
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172

  def log_metric(self, name, value, unit=None, global_step=None, extras=None):
    """Log the benchmark metric information to local file.

    Currently the logging is done in a synchronized way. This should be updated
    to log asynchronously.

    Args:
      name: string, the name of the metric to log.
      value: number, the value of the metric. The value will not be logged if it
        is not a number type.
      unit: string, the unit of the metric, E.g "image per second".
      global_step: int, the global_step when the metric is logged.
      extras: map of string:string, the extra information about the metric.
    """
173
174
    metric = _process_metric_to_json(name, value, unit, global_step, extras)
    if metric:
175
176
177
178
179
      try:
        json.dump(metric, self._metric_file_handler)
        self._metric_file_handler.write("\n")
        self._metric_file_handler.flush()
      except (TypeError, ValueError) as e:
180
        logging.warning(
181
182
            "Failed to dump metric to log file: name %s, value %s, error %s",
            name, value, e)
183

184
  def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
185
186
187
188
189
190
    """Collect most of the TF runtime information for the local env.

    The schema of the run info follows official/benchmark/datastore/schema.

    Args:
      model_name: string, the name of the model.
191
192
193
      dataset_name: string, the name of dataset for training and evaluation.
      run_params: dict, the dictionary of parameters for the run, it could
        include hyperparameters or other params that are important for the run.
194
195
      test_id: string, the unique name of the test run by the combination of key
        parameters, eg batch size, num of GPU. It is hardware independent.
196
    """
197
    run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
198

199
    with tf.io.gfile.GFile(os.path.join(
200
        self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f:
201
202
203
204
      try:
        json.dump(run_info, f)
        f.write("\n")
      except (TypeError, ValueError) as e:
205
        logging.warning("Failed to dump benchmark run info to log file: %s", e)
206

207
  def on_finish(self, status):
208
209
    self._metric_file_handler.flush()
    self._metric_file_handler.close()
210

211

212
def _gather_run_info(model_name, dataset_name, run_params, test_id):
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
213
214
215
  """Collect the benchmark run information for the local environment."""
  run_info = {
      "model_name": model_name,
216
      "dataset": {"name": dataset_name},
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
217
      "machine_config": {},
218
      "test_id": test_id,
219
220
      "run_date": datetime.datetime.utcnow().strftime(
          _DATE_TIME_FORMAT_PATTERN)}
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
221
222
  _collect_tensorflow_info(run_info)
  _collect_tensorflow_environment_variables(run_info)
223
  _collect_run_params(run_info, run_params)
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
224
225
  _collect_cpu_info(run_info)
  _collect_memory_info(run_info)
226
  _collect_test_environment(run_info)
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
227
228
229
  return run_info


230
231
232
233
def _process_metric_to_json(
    name, value, unit=None, global_step=None, extras=None):
  """Validate the metric data and generate JSON for insert."""
  if not isinstance(value, numbers.Number):
234
235
    logging.warning("Metric value to log should be a number. Got %s",
                    type(value))
236
237
238
239
240
241
242
243
244
245
246
247
248
    return None

  extras = _convert_to_json_dict(extras)
  return {
      "name": name,
      "value": float(value),
      "unit": unit,
      "global_step": global_step,
      "timestamp": datetime.datetime.utcnow().strftime(
          _DATE_TIME_FORMAT_PATTERN),
      "extras": extras}


249
250
def _collect_tensorflow_info(run_info):
  run_info["tensorflow_version"] = {
251
      "version": tf.version.VERSION, "git_hash": tf.version.GIT_VERSION}
252
253


254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def _collect_run_params(run_info, run_params):
  """Log the parameter information for the benchmark run."""
  def process_param(name, value):
    type_check = {
        str: {"name": name, "string_value": value},
        int: {"name": name, "long_value": value},
        bool: {"name": name, "bool_value": str(value)},
        float: {"name": name, "float_value": value},
    }
    return type_check.get(type(value),
                          {"name": name, "string_value": str(value)})
  if run_params:
    run_info["run_parameters"] = [
        process_param(k, v) for k, v in sorted(run_params.items())]

Karmel Allison's avatar
Karmel Allison committed
269

270
def _collect_tensorflow_environment_variables(run_info):
271
272
273
  run_info["tensorflow_environment_variables"] = [
      {"name": k, "value": v}
      for k, v in sorted(os.environ.items()) if k.startswith("TF_")]
274
275
276
277
278
279
280
281
282
283


# The following code is mirrored from tensorflow/tools/test/system_info_lib
# which is not exposed for import.
def _collect_cpu_info(run_info):
  """Collect the CPU information for the local environment."""
  cpu_info = {}

  cpu_info["num_cores"] = multiprocessing.cpu_count()

284
285
286
287
  try:
    # Note: cpuinfo is not installed in the TensorFlow OSS tree.
    # It is installable via pip.
    import cpuinfo    # pylint: disable=g-import-not-at-top
288

289
290
291
    info = cpuinfo.get_cpu_info()
    cpu_info["cpu_info"] = info["brand"]
    cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
292

293
294
    run_info["machine_config"]["cpu_info"] = cpu_info
  except ImportError:
295
    logging.warn("'cpuinfo' not imported. CPU info will not be logged.")
296
297
298


def _collect_memory_info(run_info):
299
300
301
302
303
304
305
306
  try:
    # Note: psutil is not installed in the TensorFlow OSS tree.
    # It is installable via pip.
    import psutil   # pylint: disable=g-import-not-at-top
    vmem = psutil.virtual_memory()
    run_info["machine_config"]["memory_total"] = vmem.total
    run_info["machine_config"]["memory_available"] = vmem.available
  except ImportError:
307
    logging.warn("'psutil' not imported. Memory info will not be logged.")
308
309


310
311
312
313
314
315
316
def _collect_test_environment(run_info):
  """Detect the local environment, eg GCE, AWS or DGX, etc."""
  if cloud_lib.on_gcp():
    run_info["test_environment"] = GCP_TEST_ENV
  # TODO(scottzhu): Add more testing env detection for other platform


317
318
319
320
321
322
323
def _parse_gpu_model(physical_device_desc):
  # Assume all the GPU connected are same model
  for kv in physical_device_desc.split(","):
    k, _, v = kv.partition(":")
    if k.strip() == "name":
      return v.strip()
  return None
Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
324
325
326
327
328
329
330


def _convert_to_json_dict(input_dict):
  if input_dict:
    return [{"name": k, "value": v} for k, v in sorted(input_dict.items())]
  else:
    return []