deep_speech_model.py 6.81 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""Network structure for DeepSpeech2 model."""
16
17
18
19
20
21
22
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

23
# Supported rnn cells.
24
SUPPORTED_RNNS = {
25
26
27
    "lstm": tf.nn.rnn_cell.BasicLSTMCell,
    "rnn": tf.nn.rnn_cell.RNNCell,
    "gru": tf.nn.rnn_cell.GRUCell,
28
29
}

30
31
32
# Parameters for batch normalization.
_BATCH_NORM_EPSILON = 1e-5
_BATCH_NORM_DECAY = 0.997
33

34
35
# Filters of convolution layer
_CONV_FILTERS = 32
36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

def batch_norm(inputs, training):
  """Batch normalization layer.

  Note that the momentum to use will affect validation accuracy over time.
  Batch norm has different behaviors during training/evaluation. With a large
  momentum, the model takes longer to get a near-accurate estimation of the
  moving mean/variance over the entire training dataset, which means we need
  more iterations to see good evaluation results. If the training data is evenly
  distributed over the feature space, we can also try setting a smaller momentum
  (such as 0.1) to get good evaluation result sooner.

  Args:
    inputs: input data for batch norm layer.
    training: a boolean to indicate if it is in training stage.

  Returns:
    tensor output from batch norm layer.
  """
  return tf.layers.batch_normalization(
      inputs=inputs, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON,
      fused=True, training=training)


def _conv_bn_layer(inputs, padding, filters, kernel_size, strides, layer_id,
                   training):
  """Defines 2D constitutional + batch normalization layer.
64
65

  Args:
66
67
    inputs: input data for convolution layer.
    padding: padding to be applied before convolution layer.
68
69
70
71
72
    filters: an integer, number of output filters in the convolution.
    kernel_size: a tuple specifying the height and width of the 2D convolution
      window.
    strides: a tuple specifying the stride length of the convolution.
    layer_id: an integer specifying the layer index.
73
    training: a boolean to indicate which stage we are in (training/eval).
74
75
76
77

  Returns:
    tensor output from the current layer.
  """
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  # Perform symmetric padding on the feature dimension of time_step
  # This step is required to avoid issues when RNN output sequence is shorter
  # than the label length.
  inputs = tf.pad(
      inputs,
      [[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])
  inputs = tf.layers.conv2d(
      inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
      padding="valid", use_bias=False, activation=tf.nn.relu6,
      name="cnn_{}".format(layer_id))
  return batch_norm(inputs, training)


def _rnn_layer(inputs, rnn_cell, rnn_hidden_size, layer_id, is_batch_norm,
               is_bidirectional, training):
93
94
95
  """Defines a batch normalization + rnn layer.

  Args:
96
    inputs: input tensors for the current layer.
97
98
99
100
101
102
103
    rnn_cell: RNN cell instance to use.
    rnn_hidden_size: an integer for the dimensionality of the rnn output space.
    layer_id: an integer for the index of current layer.
    is_batch_norm: a boolean specifying whether to perform batch normalization
      on input states.
    is_bidirectional: a boolean specifying whether the rnn layer is
      bi-directional.
104
    training: a boolean to indicate which stage we are in (training/eval).
105
106
107
108
109

  Returns:
    tensor output for the current layer.
  """
  if is_batch_norm:
110
    inputs = batch_norm(inputs, training)
111

112
113
114
115
116
  # Construct forward/backward RNN cells.
  fw_cell = rnn_cell(num_units=rnn_hidden_size,
                     name="rnn_fw_{}".format(layer_id))
  bw_cell = rnn_cell(num_units=rnn_hidden_size,
                     name="rnn_bw_{}".format(layer_id))
117

118
119
120
121
122
123
124
125
  if is_bidirectional:
    outputs, _ = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=fw_cell, cell_bw=bw_cell, inputs=inputs, dtype=tf.float32,
        swap_memory=True)
    rnn_outputs = tf.concat(outputs, -1)
  else:
    rnn_outputs = tf.nn.dynamic_rnn(
        fw_cell, inputs, dtype=tf.float32, swap_memory=True)
126

127
  return rnn_outputs
128
129


130
131
class DeepSpeech2(object):
  """Define DeepSpeech2 model."""
132

133
134
135
  def __init__(self, num_rnn_layers, rnn_type, is_bidirectional,
               rnn_hidden_size, num_classes, use_bias):
    """Initialize DeepSpeech2 model.
136
137
138
139
140
141
142
143
144

    Args:
      num_rnn_layers: an integer, the number of rnn layers. By default, it's 5.
      rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm.
      is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.
      rnn_hidden_size: an integer for the number of hidden states in each unit.
      num_classes: an integer, the number of output classes/labels.
      use_bias: a boolean specifying whether to use bias in the last fc layer.
    """
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
    self.num_rnn_layers = num_rnn_layers
    self.rnn_type = rnn_type
    self.is_bidirectional = is_bidirectional
    self.rnn_hidden_size = rnn_hidden_size
    self.num_classes = num_classes
    self.use_bias = use_bias

  def __call__(self, inputs, training):
    # Two cnn layers.
    inputs = _conv_bn_layer(
        inputs, padding=(20, 5), filters=_CONV_FILTERS, kernel_size=(41, 11),
        strides=(2, 2), layer_id=1, training=training)

    inputs = _conv_bn_layer(
        inputs, padding=(10, 5), filters=_CONV_FILTERS, kernel_size=(21, 11),
        strides=(2, 1), layer_id=2, training=training)

162
    # output of conv_layer2 with the shape of
163
164
165
166
167
168
169
    # [batch_size (N), times (T), features (F), channels (C)].
    # Convert the conv output to rnn input.
    batch_size = tf.shape(inputs)[0]
    feat_size = inputs.get_shape().as_list()[2]
    inputs = tf.reshape(
        inputs,
        [batch_size, -1, feat_size * _CONV_FILTERS])
170
171

    # RNN layers.
172
173
174
    rnn_cell = SUPPORTED_RNNS[self.rnn_type]
    for layer_counter in xrange(self.num_rnn_layers):
      # No batch normalization on the first layer.
175
      is_batch_norm = (layer_counter != 0)
176
177
178
179
180
181
182
183
184
185
      inputs = _rnn_layer(
          inputs, rnn_cell, self.rnn_hidden_size, layer_counter + 1,
          is_batch_norm, self.is_bidirectional, training)

    # FC layer with batch norm.
    inputs = batch_norm(inputs, training)
    logits = tf.layers.dense(inputs, self.num_classes, use_bias=self.use_bias)

    return logits