carnelinet_384.yaml 9.85 KB
Newer Older
wxj's avatar
wxj committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
# This config describes a CarneliNet model with 384 filters (CarneliNet-384) with CTC loss and word-piece tokenizer.
# The values in this config have tested on LibriSpeech dataset for effective batch size of 1K trained on 32 GPUs with AMP enabled.

# Larger and smaller models, e.g. CarneliNet-1024, can reuse the same architecture and config file. However, for larger model
# SpecAugment regularization is stronger and number of filters in epilog layer is also increased. Specifically for LibriSpeech dataset
# activation function and weight initialization policy also change for the largest of models. If a training dataset is much larger than
# LibriSpeech, Swish and tds_uniform parameters may show improvements even for CarneliNet-1024.
#
# The changes between models is captured in the following table. The rest of the parameters is the same across all models.
# +-----------------+---------+---------+------------+----------------+------------+
# | Model           | filters | encoder | activation | weight         | time_masks |
# |                 |         | final   |            | init_mode      |            |
# |                 |         | filters |            |                |            |
# +-----------------+---------+---------+------------+----------------+------------+
# | CarneliNet-256  | 256     | 640     | Swish      | tds_uniform    | 2          |
# +-----------------+---------+---------+------------+----------------+------------+
# | CarneliNet-384  | 384     | 640     | Swish      | tds_uniform    | 2          |
# +-----------------+---------+---------+------------+----------------+------------+
# | CarneliNet-512  | 512     | 640     | Swish      | tds_uniform    | 10         |
# +-----------------+---------+---------+------------+----------------+------------+
# | CarneliNet-768  | 768     | 1024    | ReLu       | xavier_uniform | 10         |
# +-----------------+---------+---------+------------+----------------+------------+
# | CarneliNet-1024 | 1024    | 1024    | ReLu       | xavier_uniform | 10         |
# +-----------------+---------+---------+------------+----------------+------------+
name: &name "CarneliNet-384-8x-Stride"

model:
  sample_rate: &sample_rate 16000

  train_ds:
    manifest_filepath: ???
    sample_rate: 16000
    batch_size: 32
    trim_silence: false
    use_start_end_token: false
    max_duration: 16.7
    shuffle: true
    num_workers: 8
    pin_memory: true
    # tarred datasets
    is_tarred: false
    tarred_audio_filepaths: null
    shuffle_n: 2048
    # bucketing params
    bucketing_strategy: "synced_randomized"
    bucketing_batch_size: null


  validation_ds:
    manifest_filepath: ???
    sample_rate: 16000
    batch_size: 32
    shuffle: false
    use_start_end_token: false
    num_workers: 8
    pin_memory: true

  test_ds:
    manifest_filepath: null
    sample_rate: 16000
    batch_size: 32
    shuffle: false
    use_start_end_token: false
    num_workers: 8
    pin_memory: true

  model_defaults:
    repeat: 5
    dropout: 0.1
    separable: true
    se: true
    se_context_size: -1
    se_repeat: false
    kernel_size: 11
    filters: 384
    encoder_final_filters: 640

  tokenizer:
    dir: ???  # path to directory which contains either tokenizer.model (bpe) or vocab.txt (for wpe)
    type: ???  # Can be either bpe or wpe

  preprocessor:
    _target_: nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor
    sample_rate: *sample_rate
    normalize: "per_feature"
    window_size: 0.025
    window_stride: 0.01
    window: "hann"
    features: &n_mels 80
    n_fft: 512
    frame_splicing: 1
    dither: 0.00001
    pad_to: 16
    stft_conv: false

  spec_augment:
    _target_: nemo.collections.asr.modules.SpectrogramAugmentation
    freq_masks: 2
    time_masks: 2
    freq_width: 27
    time_width: 0.05

  encoder:
    _target_: nemo.collections.asr.modules.ParallelConvASREncoder
    feat_in: *n_mels
    activation: swish
    init_mode: tds_uniform
    conv_mask: true

    jasper:
      - filters: ${model.model_defaults.filters}
        repeat: 1
        kernel: [5]
        stride: [1]
        dilation: [1]
        dropout: 0.0
        residual: false
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: [5]
        stride: [2]
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}
        stride_last: true
        residual_mode: "stride_add"

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: ["${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}"]
        stride: [1]
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: [7]
        stride: [2]  # *stride
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}
        stride_last: true
        residual_mode: "stride_add"

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: ["${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}"]
        stride: [1]
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: [13]
        stride: [2]  # stride
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}
        stride_last: true
        residual_mode: "stride_add"

      - filters: ${model.model_defaults.filters}
        repeat: ${model.model_defaults.repeat}
        kernel: ["${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}","${model.model_defaults.kernel_size}"]
        stride: [1]
        dilation: [1]
        dropout: ${model.model_defaults.dropout}
        residual: true
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}

      - filters: ${model.model_defaults.encoder_final_filters}
        repeat: 1
        kernel: ["${model.model_defaults.kernel_size}"]
        stride: [1]
        dilation: [1]
        dropout: 0.0
        residual: false
        separable: ${model.model_defaults.separable}
        se: ${model.model_defaults.se}
        se_context_size: ${model.model_defaults.se_context_size}
        se_repeat: ${model.model_defaults.se_repeat}


  decoder:
    _target_: nemo.collections.asr.modules.ConvASRDecoder
    feat_in: ${model.model_defaults.encoder_final_filters}
    num_classes: -1  # filled with vocabulary size from tokenizer at runtime
    vocabulary: []  # filled with vocabulary from tokenizer at runtime

  optim:
    name: novograd
    lr: 0.1

    # optimizer arguments
    betas: [0.8, 0.25]
    weight_decay: 0.001

    # scheduler setup
    sched:
      name: CosineAnnealing

      # scheduler config override
      warmup_steps: 1000
      warmup_ratio: null
      min_lr: 1e-5
      last_epoch: -1

trainer:
  devices: 1 # number of gpus
  max_epochs: 100
  max_steps: -1 # computed at runtime if not set
  num_nodes: 1
  accelerator: gpu
  strategy: ddp
  accumulate_grad_batches: 1
  enable_checkpointing: False  # Provided by exp_manager
  logger: false  # Provided by exp_manager
  log_every_n_steps: 50  # Interval of logging.
  val_check_interval: 1.0 # Set to 0.25 to check 4 times per epoch, or an int for number of iterations
  precision: 32 # If AMP is available, change to 16 to gain training speed increase and lower memory consumption (preferred).
  sync_batchnorm: false
  benchmark: false # needs to be false for models with variable-length speech input as it slows down training

exp_manager:
  exp_dir: null
  name: *name
  create_tensorboard_logger: true
  create_checkpoint_callback: true
  checkpoint_callback_params:
    monitor: "val_wer"
    mode: "min"
    always_save_nemo: true
  create_wandb_logger: false
  wandb_logger_kwargs:
    name: null
    project: null
    entity: null
  resume_if_exists: false
  resume_ignore_no_checkpoint: false

hydra:
  run:
    dir: .
  job_logging:
    root:
      handlers: null