deep_speech.py 13.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main entry to train and evaluate DeepSpeech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
24
from absl import logging
25
26
27
28
import tensorflow as tf
# pylint: enable=g-bad-import-order

import data.dataset as dataset
29
import decoder
30
31
32
import deep_speech_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
33
from official.utils.misc import model_helpers
34
35
36
37

# Default vocabulary file
_VOCABULARY_FILE = os.path.join(
    os.path.dirname(__file__), "data/vocabulary.txt")
38
39
40
41
42
# Evaluation metrics
_WER_KEY = "WER"
_CER_KEY = "CER"


43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):
  """Computes the time_steps/ctc_input_length after convolution.

  Suppose that the original feature contains two parts:
  1) Real spectrogram signals, spanning input_length steps.
  2) Padded part with all 0s.
  The total length of those two parts is denoted as max_time_steps, which is
  the padded length of the current batch. After convolution layers, the time
  steps of a spectrogram feature will be decreased. As we know the percentage
  of its original length within the entire length, we can compute the time steps
  for the signal after conv as follows (using ctc_input_length to denote):
  ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.
  This length is then fed into ctc loss function to compute loss.

  Args:
    max_time_steps: max_time_steps for the batch, after padding.
    ctc_time_steps: number of timesteps after convolution.
    input_length: actual length of the original spectrogram, without padding.

  Returns:
    the ctc_input_length after convolution layer.
  """
65
66
67
68
  ctc_input_length = tf.cast(tf.multiply(
      input_length, ctc_time_steps), dtype=tf.float32)
  return tf.cast(tf.math.floordiv(
      ctc_input_length, tf.cast(max_time_steps, dtype=tf.float32)), dtype=tf.int32)
69
70
71


def evaluate_model(estimator, speech_labels, entries, input_fn_eval):
72
73
74
75
76
77
78
79
  """Evaluate the model performance using WER anc CER as metrics.

  WER: Word Error Rate
  CER: Character Error Rate

  Args:
    estimator: estimator to evaluate.
    speech_labels: a string specifying all the character in the vocabulary.
80
81
    entries: a list of data entries (audio_file, file_size, transcript) for the
      given dataset.
82
83
84
85
86
87
    input_fn_eval: data input function for evaluation.

  Returns:
    Evaluation result containing 'wer' and 'cer' as two metrics.
  """
  # Get predictions
88
  predictions = estimator.predict(input_fn=input_fn_eval)
89

90
91
  # Get probabilities of each predicted class
  probs = [pred["probabilities"] for pred in predictions]
92

93
94
  num_of_examples = len(probs)
  targets = [entry[2] for entry in entries]  # The ground truth transcript
95

96
97
98
99
100
  total_wer, total_cer = 0, 0
  greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)
  for i in range(num_of_examples):
    # Decode string.
    decoded_str = greedy_decoder.decode(probs[i])
101
    # Compute CER.
102
103
    total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(
        len(targets[i]))
104
    # Compute WER.
105
106
    total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(
        len(targets[i].split()))
107
108
109
110
111

  # Get mean value
  total_cer /= num_of_examples
  total_wer /= num_of_examples

112
  global_step = estimator.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP)
113
114
115
  eval_results = {
      _WER_KEY: total_wer,
      _CER_KEY: total_cer,
116
      tf.compat.v1.GraphKeys.GLOBAL_STEP: global_step,
117
118
119
  }

  return eval_results
120
121


122
123
def model_fn(features, labels, mode, params):
  """Define model function for deep speech model.
124
125

  Args:
126
127
128
129
130
131
    features: a dictionary of input_data features. It includes the data
      input_length, label_length and the spectrogram features.
    labels: a list of labels for the input data.
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.
    params: a dict of hyper parameters to be passed to model_fn.
132
133

  Returns:
134
135
    EstimatorSpec parameterized according to the input params and the
    current mode.
136
  """
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  num_classes = params["num_classes"]
  input_length = features["input_length"]
  label_length = features["label_length"]
  features = features["features"]

  # Create DeepSpeech2 model.
  model = deep_speech_model.DeepSpeech2(
      flags_obj.rnn_hidden_layers, flags_obj.rnn_type,
      flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,
      num_classes, flags_obj.use_bias)

  if mode == tf.estimator.ModeKeys.PREDICT:
    logits = model(features, training=False)
    predictions = {
        "classes": tf.argmax(logits, axis=2),
152
        "probabilities": logits,
153
154
155
156
157
158
159
160
161
        "logits": logits
    }
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions)

  # In training mode.
  logits = model(features, training=True)
  ctc_input_length = compute_length_after_conv(
162
      tf.shape(features)[1], tf.shape(logits)[1], input_length)
163
  # Compute CTC loss
164
165
  loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(
      labels, logits, ctc_input_length, label_length))
166

167
168
  optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)
  global_step = tf.compat.v1.train.get_or_create_global_step()
169
  minimize_op = optimizer.minimize(loss, global_step=global_step)
170
  update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
171
172
173
174
175
176
177
  # Create the train_op that groups both minimize_ops and update_ops
  train_op = tf.group(minimize_op, update_ops)

  return tf.estimator.EstimatorSpec(
      mode=mode,
      loss=loss,
      train_op=train_op)
178
179
180
181


def generate_dataset(data_dir):
  """Generate a speech dataset."""
182
183
184
185
  audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,
                                   window_ms=flags_obj.window_ms,
                                   stride_ms=flags_obj.stride_ms,
                                   normalize=True)
186
187
188
189
  train_data_conf = dataset.DatasetConfig(
      audio_conf,
      data_dir,
      flags_obj.vocabulary_file,
190
      flags_obj.sortagrad
191
192
193
194
  )
  speech_dataset = dataset.DeepSpeechDataset(train_data_conf)
  return speech_dataset

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
def per_device_batch_size(batch_size, num_gpus):
  """For multi-gpu, batch-size must be a multiple of the number of GPUs.


  Note that distribution strategy handles this automatically when used with
  Keras. For using with Estimator, we need to get per GPU batch.

  Args:
    batch_size: Global batch size to be divided among devices. This should be
      equal to num_gpus times the single-GPU batch_size for multi-gpu training.
    num_gpus: How many GPUs are used with DistributionStrategies.

  Returns:
    Batch size per device.

  Raises:
    ValueError: if batch_size is not divisible by number of devices
  """
  if num_gpus <= 1:
    return batch_size

  remainder = batch_size % num_gpus
  if remainder:
    err = ('When running with multiple GPUs, batch size '
           'must be a multiple of the number of available GPUs. Found {} '
           'GPUs with a batch size of {}; try --batch_size={} instead.'
          ).format(num_gpus, batch_size, batch_size - remainder)
    raise ValueError(err)
  return int(batch_size / num_gpus)
224
225
226

def run_deep_speech(_):
  """Run deep speech training and eval loop."""
227
  tf.compat.v1.set_random_seed(flags_obj.seed)
228
  # Data preprocessing
229
  logging.info("Data preprocessing...")
230
231
232
233
234
235
  train_speech_dataset = generate_dataset(flags_obj.train_data_dir)
  eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)

  # Number of label classes. Label string is "[a-z]' -"
  num_classes = len(train_speech_dataset.speech_labels)

236
  # Use distribution strategy for multi-gpu training
237
  num_gpus = flags_core.get_num_gpus(flags_obj)
238
  distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
239
240
241
242
243
244
245
246
247
248
249
  run_config = tf.estimator.RunConfig(
      train_distribute=distribution_strategy)

  estimator = tf.estimator.Estimator(
      model_fn=model_fn,
      model_dir=flags_obj.model_dir,
      config=run_config,
      params={
          "num_classes": num_classes,
      }
  )
250
251
252
253
254
255
256
257
258
259
260
261

  # Benchmark logging
  run_params = {
      "batch_size": flags_obj.batch_size,
      "train_epochs": flags_obj.train_epochs,
      "rnn_hidden_size": flags_obj.rnn_hidden_size,
      "rnn_hidden_layers": flags_obj.rnn_hidden_layers,
      "rnn_type": flags_obj.rnn_type,
      "is_bidirectional": flags_obj.is_bidirectional,
      "use_bias": flags_obj.use_bias
  }

262
  per_replica_batch_size = per_device_batch_size(flags_obj.batch_size, num_gpus)
263
264
265

  def input_fn_train():
    return dataset.input_fn(
266
        per_replica_batch_size, train_speech_dataset)
267

268
  def input_fn_eval():
269
    return dataset.input_fn(
270
        per_replica_batch_size, eval_speech_dataset)
271
272
273
274

  total_training_cycle = (flags_obj.train_epochs //
                          flags_obj.epochs_between_evals)
  for cycle_index in range(total_training_cycle):
275
    logging.info("Starting a training cycle: %d/%d",
276
277
                    cycle_index + 1, total_training_cycle)

278
279
280
281
282
    # Perform batch_wise dataset shuffling
    train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(
        train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,
        flags_obj.batch_size)

Hongkun Yu's avatar
Hongkun Yu committed
283
    estimator.train(input_fn=input_fn_train)
284

285
    # Evaluation
286
    logging.info("Starting to evaluate...")
287
288

    eval_results = evaluate_model(
289
290
        estimator, eval_speech_dataset.speech_labels,
        eval_speech_dataset.entries, input_fn_eval)
291

292
293
    # Log the WER and CER results.
    benchmark_logger.log_evaluation_result(eval_results)
294
    logging.info(
295
296
        "Iteration {}: WER = {:.2f}, CER = {:.2f}".format(
            cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))
297
298

    # If some evaluation threshold is met
299
300
301
    if model_helpers.past_stop_threshold(
        flags_obj.wer_threshold, eval_results[_WER_KEY]):
      break
302
303
304
305
306
307


def define_deep_speech_flags():
  """Add flags for run_deep_speech."""
  # Add common flags
  flags_core.define_base(
308
309
310
311
312
313
      data_dir=False,  # we use train_data_dir and eval_data_dir instead
      export_dir=True,
      train_epochs=True,
      hooks=True,
      num_gpu=True,
      epochs_between_evals=True
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
  )
  flags_core.define_performance(
      num_parallel_calls=False,
      inter_op=False,
      intra_op=False,
      synthetic_data=False,
      max_train_steps=False,
      dtype=False
  )
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)

  flags_core.set_defaults(
      model_dir="/tmp/deep_speech_model/",
      export_dir="/tmp/deep_speech_saved_model/",
329
      train_epochs=10,
330
      batch_size=128,
331
332
333
      hooks="")

  # Deep speech flags
334
335
336
337
  flags.DEFINE_integer(
      name="seed", default=1,
      help=flags_core.help_wrap("The random seed."))

338
339
  flags.DEFINE_string(
      name="train_data_dir",
340
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
341
342
343
344
      help=flags_core.help_wrap("The csv file path of train dataset."))

  flags.DEFINE_string(
      name="eval_data_dir",
345
      default="/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv",
346
347
      help=flags_core.help_wrap("The csv file path of evaluation dataset."))

348
349
350
351
352
353
  flags.DEFINE_bool(
      name="sortagrad", default=True,
      help=flags_core.help_wrap(
          "If true, sort examples by audio length and perform no "
          "batch_wise shuffling for the first epoch."))

354
355
356
357
358
  flags.DEFINE_integer(
      name="sample_rate", default=16000,
      help=flags_core.help_wrap("The sample rate for audio."))

  flags.DEFINE_integer(
359
      name="window_ms", default=20,
360
361
362
      help=flags_core.help_wrap("The frame length for spectrogram."))

  flags.DEFINE_integer(
363
      name="stride_ms", default=10,
364
365
366
367
368
369
370
371
      help=flags_core.help_wrap("The frame step."))

  flags.DEFINE_string(
      name="vocabulary_file", default=_VOCABULARY_FILE,
      help=flags_core.help_wrap("The file path of vocabulary file."))

  # RNN related flags
  flags.DEFINE_integer(
372
      name="rnn_hidden_size", default=800,
373
374
375
      help=flags_core.help_wrap("The hidden size of RNNs."))

  flags.DEFINE_integer(
376
      name="rnn_hidden_layers", default=5,
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
      help=flags_core.help_wrap("The number of RNN layers."))

  flags.DEFINE_bool(
      name="use_bias", default=True,
      help=flags_core.help_wrap("Use bias in the last fully-connected layer"))

  flags.DEFINE_bool(
      name="is_bidirectional", default=True,
      help=flags_core.help_wrap("If rnn unit is bidirectional"))

  flags.DEFINE_enum(
      name="rnn_type", default="gru",
      enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),
      case_sensitive=False,
      help=flags_core.help_wrap("Type of RNN cell."))

  # Training related flags
  flags.DEFINE_float(
395
      name="learning_rate", default=5e-4,
396
397
398
399
400
401
402
403
404
405
406
407
408
      help=flags_core.help_wrap("The initial learning rate."))

  # Evaluation metrics threshold
  flags.DEFINE_float(
      name="wer_threshold", default=None,
      help=flags_core.help_wrap(
          "If passed, training will stop when the evaluation metric WER is "
          "greater than or equal to wer_threshold. For libri speech dataset "
          "the desired wer_threshold is 0.23 which is the result achieved by "
          "MLPerf implementation."))


def main(_):
Hongkun Yu's avatar
Hongkun Yu committed
409
  run_deep_speech(flags_obj)
410
411
412


if __name__ == "__main__":
413
  logging.set_verbosity(logging.INFO)
414
415
416
  define_deep_speech_flags()
  flags_obj = flags.FLAGS
  absl_app.run(main)
417