deep_speech.py 13.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main entry to train and evaluate DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order

import data.dataset as dataset
28
import decoder
29
30
31
32
33
import deep_speech_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.misc import distribution_utils
34
from official.utils.misc import model_helpers
35
36
37
38

# Default vocabulary file
_VOCABULARY_FILE = os.path.join(
    os.path.dirname(__file__), "data/vocabulary.txt")
39
40
41
42
43
# Evaluation metrics
_WER_KEY = "WER"
_CER_KEY = "CER"


44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
  """Computes the time_steps/ctc_input_length after convolution.

  Suppose that the original feature contains two parts:
  1) Real spectrogram signals, spanning input_length steps.
  2) Padded part with all 0s.
  The total length of those two parts is denoted as max_time_steps, which is
  the padded length of the current batch. After convolution layers, the time
  steps of a spectrogram feature will be decreased. As we know the percentage
  of its original length within the entire length, we can compute the time steps
  for the signal after conv as follows (using ctc_input_length to denote):
  ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
  This length is then fed into ctc loss function to compute loss.

  Args:
    max_time_steps: max_time_steps for the batch, after padding.
    ctc_time_steps: number of timesteps after convolution.
    input_length: actual length of the original spectrogram, without padding.

  Returns:
    the ctc_input_length after convolution layer.
  """
  ctc_input_length = tf.to_float(tf.multiply(
      input_length, ctc_time_steps))
  return tf.to_int32(tf.floordiv(
      ctc_input_length, tf.to_float(max_time_steps)))


def ctc_loss(label_length, ctc_input_length, labels, logits):
  """Computes the ctc loss for the current batch of predictions."""
  label_length = tf.to_int32(tf.squeeze(label_length))
  ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length))
  sparse_labels = tf.to_int32(
      tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length))
  y_pred = tf.log(tf.transpose(
      logits, perm=[1, 0, 2]) + tf.keras.backend.epsilon())

  return tf.expand_dims(
      tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred,
                     sequence_length=ctc_input_length),
      axis=1)


def evaluate_model(estimator, speech_labels, entries, input_fn_eval):
88
89
90
91
92
93
94
95
  """Evaluate the model performance using WER anc CER as metrics.

  WER: Word Error Rate
  CER: Character Error Rate

  Args:
    estimator: estimator to evaluate.
    speech_labels: a string specifying all the character in the vocabulary.
96
97
    entries: a list of data entries (audio_file, file_size, transcript) for the
      given dataset.
98
99
100
101
102
103
    input_fn_eval: data input function for evaluation.

  Returns:
    Evaluation result containing 'wer' and 'cer' as two metrics.
  """
  # Get predictions
104
  predictions = estimator.predict(input_fn=input_fn_eval)
105

106
107
  # Get probabilities of each predicted class
  probs = [pred["probabilities"] for pred in predictions]
108

109
110
  num_of_examples = len(probs)
  targets = [entry[2] for entry in entries]  # The ground truth transcript
111

112
113
114
115
116
  total_wer, total_cer = 0, 0
  greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)
  for i in range(num_of_examples):
    # Decode string.
    decoded_str = greedy_decoder.decode(probs[i])
117
    # Compute CER.
118
119
    total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(
        len(targets[i]))
120
    # Compute WER.
121
122
    total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(
        len(targets[i].split()))
123
124
125
126
127
128
129
130
131
132
133
134
135

  # Get mean value
  total_cer /= num_of_examples
  total_wer /= num_of_examples

  global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)
  eval_results = {
      _WER_KEY: total_wer,
      _CER_KEY: total_cer,
      tf.GraphKeys.GLOBAL_STEP: global_step,
  }

  return eval_results
136
137


138
139
def model_fn(features, labels, mode, params):
  """Define model function for deep speech model.
140
141

  Args:
142
143
144
145
146
147
    features: a dictionary of input_data features. It includes the data
      input_length, label_length and the spectrogram features.
    labels: a list of labels for the input data.
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.
    params: a dict of hyper parameters to be passed to model_fn.
148
149

  Returns:
150
151
    EstimatorSpec parameterized according to the input params and the
    current mode.
152
  """
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
  num_classes = params["num_classes"]
  input_length = features["input_length"]
  label_length = features["label_length"]
  features = features["features"]

  # Create DeepSpeech2 model.
  model = deep_speech_model.DeepSpeech2(
      flags_obj.rnn_hidden_layers, flags_obj.rnn_type,
      flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,
      num_classes, flags_obj.use_bias)

  if mode == tf.estimator.ModeKeys.PREDICT:
    logits = model(features, training=False)
    predictions = {
        "classes": tf.argmax(logits, axis=2),
        "probabilities": tf.nn.softmax(logits),
        "logits": logits
    }
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions)

  # In training mode.
  logits = model(features, training=True)
  probs = tf.nn.softmax(logits)
  ctc_input_length = compute_length_after_conv(
      tf.shape(features)[1], tf.shape(probs)[1], input_length)
  # Compute CTC loss
  loss = tf.reduce_mean(ctc_loss(
      label_length, ctc_input_length, labels, probs))

  optimizer = tf.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)
  global_step = tf.train.get_or_create_global_step()
  minimize_op = optimizer.minimize(loss, global_step=global_step)
  update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
  # Create the train_op that groups both minimize_ops and update_ops
  train_op = tf.group(minimize_op, update_ops)

  return tf.estimator.EstimatorSpec(
      mode=mode,
      loss=loss,
      train_op=train_op)
195
196
197
198


def generate_dataset(data_dir):
  """Generate a speech dataset."""
199
200
201
202
  audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,
                                   window_ms=flags_obj.window_ms,
                                   stride_ms=flags_obj.stride_ms,
                                   normalize=True)
203
204
205
206
  train_data_conf = dataset.DatasetConfig(
      audio_conf,
      data_dir,
      flags_obj.vocabulary_file,
207
      flags_obj.sortagrad
208
209
210
211
212
213
214
  )
  speech_dataset = dataset.DeepSpeechDataset(train_data_conf)
  return speech_dataset


def run_deep_speech(_):
  """Run deep speech training and eval loop."""
215
  tf.set_random_seed(flags_obj.seed)
216
217
218
219
220
221
222
223
  # Data preprocessing
  tf.logging.info("Data preprocessing...")
  train_speech_dataset = generate_dataset(flags_obj.train_data_dir)
  eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)

  # Number of label classes. Label string is "[a-z]' -"
  num_classes = len(train_speech_dataset.speech_labels)

224
  # Use distribution strategy for multi-gpu training
225
  num_gpus = flags_core.get_num_gpus(flags_obj)
226
  distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
227
228
229
230
231
232
233
234
235
236
237
  run_config = tf.estimator.RunConfig(
      train_distribute=distribution_strategy)

  estimator = tf.estimator.Estimator(
      model_fn=model_fn,
      model_dir=flags_obj.model_dir,
      config=run_config,
      params={
          "num_classes": num_classes,
      }
  )
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

  # Benchmark logging
  run_params = {
      "batch_size": flags_obj.batch_size,
      "train_epochs": flags_obj.train_epochs,
      "rnn_hidden_size": flags_obj.rnn_hidden_size,
      "rnn_hidden_layers": flags_obj.rnn_hidden_layers,
      "rnn_type": flags_obj.rnn_type,
      "is_bidirectional": flags_obj.is_bidirectional,
      "use_bias": flags_obj.use_bias
  }

  dataset_name = "LibriSpeech"
  benchmark_logger = logger.get_benchmark_logger()
  benchmark_logger.log_run_info("deep_speech", dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)

  train_hooks = hooks_helper.get_train_hooks(
      flags_obj.hooks,
257
      model_dir=flags_obj.model_dir,
258
259
      batch_size=flags_obj.batch_size)

260
  per_replica_batch_size = distribution_utils.per_replica_batch_size(
261
262
263
264
      flags_obj.batch_size, num_gpus)

  def input_fn_train():
    return dataset.input_fn(
265
        per_replica_batch_size, train_speech_dataset)
266

267
  def input_fn_eval():
268
    return dataset.input_fn(
269
        per_replica_batch_size, eval_speech_dataset)
270
271
272
273
274
275
276

  total_training_cycle = (flags_obj.train_epochs //
                          flags_obj.epochs_between_evals)
  for cycle_index in range(total_training_cycle):
    tf.logging.info("Starting a training cycle: %d/%d",
                    cycle_index + 1, total_training_cycle)

277
278
279
280
281
    # Perform batch_wise dataset shuffling
    train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(
        train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,
        flags_obj.batch_size)

282
283
    estimator.train(input_fn=input_fn_train, hooks=train_hooks)

284
285
286
287
    # Evaluation
    tf.logging.info("Starting to evaluate...")

    eval_results = evaluate_model(
288
289
        estimator, eval_speech_dataset.speech_labels,
        eval_speech_dataset.entries, input_fn_eval)
290

291
292
293
294
295
    # Log the WER and CER results.
    benchmark_logger.log_evaluation_result(eval_results)
    tf.logging.info(
        "Iteration {}: WER = {:.2f}, CER = {:.2f}".format(
            cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))
296
297

    # If some evaluation threshold is met
298
299
300
    if model_helpers.past_stop_threshold(
        flags_obj.wer_threshold, eval_results[_WER_KEY]):
      break
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322


def define_deep_speech_flags():
  """Add flags for run_deep_speech."""
  # Add common flags
  flags_core.define_base(
      data_dir=False  # we use train_data_dir and eval_data_dir instead
  )
  flags_core.define_performance(
      num_parallel_calls=False,
      inter_op=False,
      intra_op=False,
      synthetic_data=False,
      max_train_steps=False,
      dtype=False
  )
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags_core.set_defaults(
      model_dir="/tmp/deep_speech_model/",
      export_dir="/tmp/deep_speech_saved_model/",
323
      train_epochs=10,
324
      batch_size=128,
325
326
327
      hooks="")

  # Deep speech flags
328
329
330
331
  flags.DEFINE_integer(
      name="seed", default=1,
      help=flags_core.help_wrap("The random seed."))

332
333
  flags.DEFINE_string(
      name="train_data_dir",
334
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
335
336
337
338
      help=flags_core.help_wrap("The csv file path of train dataset."))

  flags.DEFINE_string(
      name="eval_data_dir",
339
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
340
341
      help=flags_core.help_wrap("The csv file path of evaluation dataset."))

342
343
344
345
346
347
  flags.DEFINE_bool(
      name="sortagrad", default=True,
      help=flags_core.help_wrap(
          "If true, sort examples by audio length and perform no "
          "batch_wise shuffling for the first epoch."))

348
349
350
351
352
  flags.DEFINE_integer(
      name="sample_rate", default=16000,
      help=flags_core.help_wrap("The sample rate for audio."))

  flags.DEFINE_integer(
353
      name="window_ms", default=20,
354
355
356
      help=flags_core.help_wrap("The frame length for spectrogram."))

  flags.DEFINE_integer(
357
      name="stride_ms", default=10,
358
359
360
361
362
363
364
365
      help=flags_core.help_wrap("The frame step."))

  flags.DEFINE_string(
      name="vocabulary_file", default=_VOCABULARY_FILE,
      help=flags_core.help_wrap("The file path of vocabulary file."))

  # RNN related flags
  flags.DEFINE_integer(
366
      name="rnn_hidden_size", default=800,
367
368
369
      help=flags_core.help_wrap("The hidden size of RNNs."))

  flags.DEFINE_integer(
370
      name="rnn_hidden_layers", default=5,
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
      help=flags_core.help_wrap("The number of RNN layers."))

  flags.DEFINE_bool(
      name="use_bias", default=True,
      help=flags_core.help_wrap("Use bias in the last fully-connected layer"))

  flags.DEFINE_bool(
      name="is_bidirectional", default=True,
      help=flags_core.help_wrap("If rnn unit is bidirectional"))

  flags.DEFINE_enum(
      name="rnn_type", default="gru",
      enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),
      case_sensitive=False,
      help=flags_core.help_wrap("Type of RNN cell."))

  # Training related flags
  flags.DEFINE_float(
389
      name="learning_rate", default=5e-4,
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
      help=flags_core.help_wrap("The initial learning rate."))

  # Evaluation metrics threshold
  flags.DEFINE_float(
      name="wer_threshold", default=None,
      help=flags_core.help_wrap(
          "If passed, training will stop when the evaluation metric WER is "
          "greater than or equal to wer_threshold. For libri speech dataset "
          "the desired wer_threshold is 0.23 which is the result achieved by "
          "MLPerf implementation."))


def main(_):
  with logger.benchmark_context(flags_obj):
    run_deep_speech(flags_obj)


if __name__ == "__main__":
  tf.logging.set_verbosity(tf.logging.INFO)
  define_deep_speech_flags()
  flags_obj = flags.FLAGS
  absl_app.run(main)
412