deep_speech.py 14 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main entry to train and evaluate DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order

import data.dataset as dataset
28
import decoder
29
30
31
import deep_speech_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
32
from official.utils.misc import model_helpers
33
34
35
36

# Default vocabulary file
_VOCABULARY_FILE = os.path.join(
    os.path.dirname(__file__), "data/vocabulary.txt")
37
38
39
40
41
# Evaluation metrics
_WER_KEY = "WER"
_CER_KEY = "CER"


42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
  """Computes the time_steps/ctc_input_length after convolution.

  Suppose that the original feature contains two parts:
  1) Real spectrogram signals, spanning input_length steps.
  2) Padded part with all 0s.
  The total length of those two parts is denoted as max_time_steps, which is
  the padded length of the current batch. After convolution layers, the time
  steps of a spectrogram feature will be decreased. As we know the percentage
  of its original length within the entire length, we can compute the time steps
  for the signal after conv as follows (using ctc_input_length to denote):
  ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
  This length is then fed into ctc loss function to compute loss.

  Args:
    max_time_steps: max_time_steps for the batch, after padding.
    ctc_time_steps: number of timesteps after convolution.
    input_length: actual length of the original spectrogram, without padding.

  Returns:
    the ctc_input_length after convolution layer.
  """
64
65
66
67
  ctc_input_length = tf.cast(tf.multiply(
      input_length, ctc_time_steps), dtype=tf.float32)
  return tf.cast(tf.math.floordiv(
      ctc_input_length, tf.cast(max_time_steps, dtype=tf.float32)), dtype=tf.int32)
68
69
70


def evaluate_model(estimator, speech_labels, entries, input_fn_eval):
71
72
73
74
75
76
77
78
  """Evaluate the model performance using WER anc CER as metrics.

  WER: Word Error Rate
  CER: Character Error Rate

  Args:
    estimator: estimator to evaluate.
    speech_labels: a string specifying all the character in the vocabulary.
79
80
    entries: a list of data entries (audio_file, file_size, transcript) for the
      given dataset.
81
82
83
84
85
86
    input_fn_eval: data input function for evaluation.

  Returns:
    Evaluation result containing 'wer' and 'cer' as two metrics.
  """
  # Get predictions
87
  predictions = estimator.predict(input_fn=input_fn_eval)
88

89
90
  # Get probabilities of each predicted class
  probs = [pred["probabilities"] for pred in predictions]
91

92
93
  num_of_examples = len(probs)
  targets = [entry[2] for entry in entries]  # The ground truth transcript
94

95
96
97
98
99
  total_wer, total_cer = 0, 0
  greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)
  for i in range(num_of_examples):
    # Decode string.
    decoded_str = greedy_decoder.decode(probs[i])
100
    # Compute CER.
101
102
    total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(
        len(targets[i]))
103
    # Compute WER.
104
105
    total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(
        len(targets[i].split()))
106
107
108
109
110

  # Get mean value
  total_cer /= num_of_examples
  total_wer /= num_of_examples

111
  global_step = estimator.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP)
112
113
114
  eval_results = {
      _WER_KEY: total_wer,
      _CER_KEY: total_cer,
115
      tf.compat.v1.GraphKeys.GLOBAL_STEP: global_step,
116
117
118
  }

  return eval_results
119
120


121
122
def model_fn(features, labels, mode, params):
  """Define model function for deep speech model.
123
124

  Args:
125
126
127
128
129
130
    features: a dictionary of input_data features. It includes the data
      input_length, label_length and the spectrogram features.
    labels: a list of labels for the input data.
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.
    params: a dict of hyper parameters to be passed to model_fn.
131
132

  Returns:
133
134
    EstimatorSpec parameterized according to the input params and the
    current mode.
135
  """
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  num_classes = params["num_classes"]
  input_length = features["input_length"]
  label_length = features["label_length"]
  features = features["features"]

  # Create DeepSpeech2 model.
  model = deep_speech_model.DeepSpeech2(
      flags_obj.rnn_hidden_layers, flags_obj.rnn_type,
      flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,
      num_classes, flags_obj.use_bias)

  if mode == tf.estimator.ModeKeys.PREDICT:
    logits = model(features, training=False)
    predictions = {
        "classes": tf.argmax(logits, axis=2),
151
        "probabilities": logits,
152
153
154
155
156
157
158
159
160
        "logits": logits
    }
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions)

  # In training mode.
  logits = model(features, training=True)
  ctc_input_length = compute_length_after_conv(
161
      tf.shape(features)[1], tf.shape(logits)[1], input_length)
162
  # Compute CTC loss
163
164
  loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(
      labels, logits, ctc_input_length, label_length))
165

166
167
  optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)
  global_step = tf.compat.v1.train.get_or_create_global_step()
168
  minimize_op = optimizer.minimize(loss, global_step=global_step)
169
  update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
170
171
172
173
174
175
176
  # Create the train_op that groups both minimize_ops and update_ops
  train_op = tf.group(minimize_op, update_ops)

  return tf.estimator.EstimatorSpec(
      mode=mode,
      loss=loss,
      train_op=train_op)
177
178
179
180


def generate_dataset(data_dir):
  """Generate a speech dataset."""
181
182
183
184
  audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,
                                   window_ms=flags_obj.window_ms,
                                   stride_ms=flags_obj.stride_ms,
                                   normalize=True)
185
186
187
188
  train_data_conf = dataset.DatasetConfig(
      audio_conf,
      data_dir,
      flags_obj.vocabulary_file,
189
      flags_obj.sortagrad
190
191
192
193
  )
  speech_dataset = dataset.DeepSpeechDataset(train_data_conf)
  return speech_dataset

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def per_device_batch_size(batch_size, num_gpus):
  """For multi-gpu, batch-size must be a multiple of the number of GPUs.


  Note that distribution strategy handles this automatically when used with
  Keras. For using with Estimator, we need to get per GPU batch.

  Args:
    batch_size: Global batch size to be divided among devices. This should be
      equal to num_gpus times the single-GPU batch_size for multi-gpu training.
    num_gpus: How many GPUs are used with DistributionStrategies.

  Returns:
    Batch size per device.

  Raises:
    ValueError: if batch_size is not divisible by number of devices
  """
  if num_gpus <= 1:
    return batch_size

  remainder = batch_size % num_gpus
  if remainder:
    err = ('When running with multiple GPUs, batch size '
           'must be a multiple of the number of available GPUs. Found {} '
           'GPUs with a batch size of {}; try --batch_size={} instead.'
          ).format(num_gpus, batch_size, batch_size - remainder)
    raise ValueError(err)
  return int(batch_size / num_gpus)
223
224
225

def run_deep_speech(_):
  """Run deep speech training and eval loop."""
226
  tf.compat.v1.set_random_seed(flags_obj.seed)
227
  # Data preprocessing
228
  tf.compat.v1.logging.info("Data preprocessing...")
229
230
231
232
233
234
  train_speech_dataset = generate_dataset(flags_obj.train_data_dir)
  eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)

  # Number of label classes. Label string is "[a-z]' -"
  num_classes = len(train_speech_dataset.speech_labels)

235
  # Use distribution strategy for multi-gpu training
236
  num_gpus = flags_core.get_num_gpus(flags_obj)
237
  distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
238
239
240
241
242
243
244
245
246
247
248
  run_config = tf.estimator.RunConfig(
      train_distribute=distribution_strategy)

  estimator = tf.estimator.Estimator(
      model_fn=model_fn,
      model_dir=flags_obj.model_dir,
      config=run_config,
      params={
          "num_classes": num_classes,
      }
  )
249
250
251
252
253
254
255
256
257
258
259
260

  # Benchmark logging
  run_params = {
      "batch_size": flags_obj.batch_size,
      "train_epochs": flags_obj.train_epochs,
      "rnn_hidden_size": flags_obj.rnn_hidden_size,
      "rnn_hidden_layers": flags_obj.rnn_hidden_layers,
      "rnn_type": flags_obj.rnn_type,
      "is_bidirectional": flags_obj.is_bidirectional,
      "use_bias": flags_obj.use_bias
  }

261
  per_replica_batch_size = per_device_batch_size(flags_obj.batch_size, num_gpus)
262
263
264

  def input_fn_train():
    return dataset.input_fn(
265
        per_replica_batch_size, train_speech_dataset)
266

267
  def input_fn_eval():
268
    return dataset.input_fn(
269
        per_replica_batch_size, eval_speech_dataset)
270
271
272
273

  total_training_cycle = (flags_obj.train_epochs //
                          flags_obj.epochs_between_evals)
  for cycle_index in range(total_training_cycle):
274
    tf.compat.v1.logging.info("Starting a training cycle: %d/%d",
275
276
                    cycle_index + 1, total_training_cycle)

277
278
279
280
281
    # Perform batch_wise dataset shuffling
    train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(
        train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,
        flags_obj.batch_size)

Hongkun Yu's avatar
Hongkun Yu committed
282
    estimator.train(input_fn=input_fn_train)
283

284
    # Evaluation
285
    tf.compat.v1.logging.info("Starting to evaluate...")
286
287

    eval_results = evaluate_model(
288
289
        estimator, eval_speech_dataset.speech_labels,
        eval_speech_dataset.entries, input_fn_eval)
290

291
292
    # Log the WER and CER results.
    benchmark_logger.log_evaluation_result(eval_results)
293
    tf.compat.v1.logging.info(
294
295
        "Iteration {}: WER = {:.2f}, CER = {:.2f}".format(
            cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))
296
297

    # If some evaluation threshold is met
298
299
300
    if model_helpers.past_stop_threshold(
        flags_obj.wer_threshold, eval_results[_WER_KEY]):
      break
301
302
303
304
305
306


def define_deep_speech_flags():
  """Add flags for run_deep_speech."""
  # Add common flags
  flags_core.define_base(
307
308
309
310
311
312
      data_dir=False,  # we use train_data_dir and eval_data_dir instead
      export_dir=True,
      train_epochs=True,
      hooks=True,
      num_gpu=True,
      epochs_between_evals=True
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  )
  flags_core.define_performance(
      num_parallel_calls=False,
      inter_op=False,
      intra_op=False,
      synthetic_data=False,
      max_train_steps=False,
      dtype=False
  )
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags_core.set_defaults(
      model_dir="/tmp/deep_speech_model/",
      export_dir="/tmp/deep_speech_saved_model/",
328
      train_epochs=10,
329
      batch_size=128,
330
331
332
      hooks="")

  # Deep speech flags
333
334
335
336
  flags.DEFINE_integer(
      name="seed", default=1,
      help=flags_core.help_wrap("The random seed."))

337
338
  flags.DEFINE_string(
      name="train_data_dir",
339
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
340
341
342
343
      help=flags_core.help_wrap("The csv file path of train dataset."))

  flags.DEFINE_string(
      name="eval_data_dir",
344
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
345
346
      help=flags_core.help_wrap("The csv file path of evaluation dataset."))

347
348
349
350
351
352
  flags.DEFINE_bool(
      name="sortagrad", default=True,
      help=flags_core.help_wrap(
          "If true, sort examples by audio length and perform no "
          "batch_wise shuffling for the first epoch."))

353
354
355
356
357
  flags.DEFINE_integer(
      name="sample_rate", default=16000,
      help=flags_core.help_wrap("The sample rate for audio."))

  flags.DEFINE_integer(
358
      name="window_ms", default=20,
359
360
361
      help=flags_core.help_wrap("The frame length for spectrogram."))

  flags.DEFINE_integer(
362
      name="stride_ms", default=10,
363
364
365
366
367
368
369
370
      help=flags_core.help_wrap("The frame step."))

  flags.DEFINE_string(
      name="vocabulary_file", default=_VOCABULARY_FILE,
      help=flags_core.help_wrap("The file path of vocabulary file."))

  # RNN related flags
  flags.DEFINE_integer(
371
      name="rnn_hidden_size", default=800,
372
373
374
      help=flags_core.help_wrap("The hidden size of RNNs."))

  flags.DEFINE_integer(
375
      name="rnn_hidden_layers", default=5,
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
      help=flags_core.help_wrap("The number of RNN layers."))

  flags.DEFINE_bool(
      name="use_bias", default=True,
      help=flags_core.help_wrap("Use bias in the last fully-connected layer"))

  flags.DEFINE_bool(
      name="is_bidirectional", default=True,
      help=flags_core.help_wrap("If rnn unit is bidirectional"))

  flags.DEFINE_enum(
      name="rnn_type", default="gru",
      enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),
      case_sensitive=False,
      help=flags_core.help_wrap("Type of RNN cell."))

  # Training related flags
  flags.DEFINE_float(
394
      name="learning_rate", default=5e-4,
395
396
397
398
399
400
401
402
403
404
405
406
407
      help=flags_core.help_wrap("The initial learning rate."))

  # Evaluation metrics threshold
  flags.DEFINE_float(
      name="wer_threshold", default=None,
      help=flags_core.help_wrap(
          "If passed, training will stop when the evaluation metric WER is "
          "greater than or equal to wer_threshold. For libri speech dataset "
          "the desired wer_threshold is 0.23 which is the result achieved by "
          "MLPerf implementation."))


def main(_):
Hongkun Yu's avatar
Hongkun Yu committed
408
  run_deep_speech(flags_obj)
409
410
411


if __name__ == "__main__":
412
  tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
413
414
415
  define_deep_speech_flags()
  flags_obj = flags.FLAGS
  absl_app.run(main)
416