trainer_abstract.h 15.6 KB
Newer Older
1
2
3
4
5
6
7
8
// Copyright (C) 2015  Davis E. King (davis@dlib.net)
// License: Boost Software License   See LICENSE.txt for the full license.
#undef DLIB_DNn_TRAINER_ABSTRACT_H_
#ifdef DLIB_DNn_TRAINER_ABSTRACT_H_

#include "core_abstract.h"
#include "solvers_abstract.h"
#include <vector>
9
#include <chrono>
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31


namespace dlib
{

// ----------------------------------------------------------------------------------------

    template <
        typename net_type, 
        typename solver_type = sgd
        >
    class dnn_trainer
    {
        /*!
            REQUIREMENTS ON net_type
                - net_type is an add_loss_layer object.

            REQUIREMENTS ON solver_type
                - solver_type is an implementation of the EXAMPLE_SOLVER interface defined
                  in solvers_abstract.h

            WHAT THIS OBJECT REPRESENTS
Davis King's avatar
Davis King committed
32
33
34
35
                This object is a tool training a deep neural network. To use it you supply
                a neural network type and a solver, then you call train() with your
                training data and it will output a new network instance that has hopefully
                learned something useful from your training data.
36
37
38
39
40
41
42

        !*/

    public:

        typedef typename net_type::label_type label_type;
        typedef typename net_type::input_type input_type;
43
        const static size_t num_layers = net_type::num_layers;
44

45
46
        dnn_trainer() = delete;
        dnn_trainer(const dnn_trainer&) = delete;
47
48

        dnn_trainer(
49
50
            net_type& net, 
            const solver_type& solver = solver_type()
51
        ); 
Davis King's avatar
Davis King committed
52
53
        /*!
            ensures
54
55
56
57
                - &#get_net() == &net 
                  (i.e. The dnn_trainer holds a reference to net, it does not copy it.
                  Therefore, you must ensure net has a lifetime at least as long as the
                  dnn_trainer).
Davis King's avatar
Davis King committed
58
59
                - #get_solvers() == a set of solvers that are all initialized with the
                  provided solver instance.
60
61
62
                - #get_max_num_epochs() == 10000
                - #get_mini_batch_size() == 128
                - #get_step_size() == 1
63
                - #get_min_step_size() == 1e-3
64
                - #get_iterations_without_progress_threshold() == 2000
65
                - #get_step_size_shrink() == 0.1
Davis King's avatar
Davis King committed
66
        !*/
67

68
       net_type& get_net (
69
        ) const; 
Davis King's avatar
Davis King committed
70
71
        /*!
            ensures
72
73
74
75
76
                - returns the neural network object used by this trainer.  This is the
                  network that is optimized when you call train() or train_one_step().
                  Recall that the dnn_trainer doesn't contain the net_type object but
                  simply holds a reference to an external network which was provided to the
                  dnn_trainer's constructor.
Davis King's avatar
Davis King committed
77
        !*/
78
79

        void set_solver (
Davis King's avatar
Davis King committed
80
            const solver_type& solver
81
        );
Davis King's avatar
Davis King committed
82
83
84
85
86
        /*!
            ensures
                - assigns solver to all the solvers in this object. I.e.  solver will be
                  assigned to each element in get_solvers(). 
        !*/
87

88
        const std::vector<solver_type>& get_solvers (
89
        ) const; 
Davis King's avatar
Davis King committed
90
91
92
93
        /*!
            ensures
                - returns the solvers used to optimize each layer of the neural network
                  get_net().  In particular, the first layer's solver is
94
95
                  get_solvers()[0], the second layer's solver is
                  get_solvers()[1], and so on.
Davis King's avatar
Davis King committed
96
        !*/
97

98
        std::vector<solver_type>& get_solvers (
99
        ); 
Davis King's avatar
Davis King committed
100
101
102
103
        /*!
            ensures
                - returns the solvers used to optimize each layer of the neural network
                  get_net().  In particular, the first layer's solver is
104
105
106
107
108
109
110
111
                  get_solvers()[0], the second layer's solver is
                  get_solvers()[1], and so on.
                - It should be noted that you should never change the number of elements in
                  the vector returned by get_solvers() (i.e. don't do something that
                  changes get_solvers().size()).  It will be set to net_type::num_layers by
                  this object and you should leave it at that.  The non-const version of
                  get_solvers() is provided only so you can tweak the parameters of a
                  particular solver.
Davis King's avatar
Davis King committed
112
        !*/
113
114
115

        unsigned long get_mini_batch_size (
        ) const; 
Davis King's avatar
Davis King committed
116
117
118
119
120
121
122
        /*!
            ensures
                - During training, we call the network's update() routine over and over
                  with training data.  The number of training samples we give to each call
                  to update is the "mini-batch size", which is defined by
                  get_mini_batch_size().
        !*/
123
124
125
126

        void set_mini_batch_size (
            unsigned long batch_size 
        );
Davis King's avatar
Davis King committed
127
128
129
130
131
132
        /*!
            requires
                - batch_size > 0
            ensures
                - #get_mini_batch_size() == batch_size
        !*/
133

134
        unsigned long get_max_num_epochs (
135
        ) const; 
Davis King's avatar
Davis King committed
136
137
        /*!
            ensures
138
139
                - train() will execute at most get_max_num_epochs() iterations over the
                  training data before returning.
Davis King's avatar
Davis King committed
140
        !*/
141

142
        void set_max_num_epochs (
143
            unsigned long num
Davis King's avatar
Davis King committed
144
        );
Davis King's avatar
Davis King committed
145
146
147
148
        /*!
            requires
                - num > 0
            ensures
149
150
151
                - #get_max_num_epochs() == num
        !*/

152
        void set_step_size (
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
            double ss
        );
        /*!
            requires
                - ss > 0
            ensures
                - #get_step_size() == ss
        !*/

        double get_step_size(
        ) const;
        /*!
            ensures
                - During each training step, a solver tells us how to modify the parameters
                  of each layer in the network.  It does this by outputting a step vector,
                  that when added to the parameters, will hopefully result in improved
                  network performance.  In our case, at during each step, we multiply the
                  step vector from the solver by get_step_size() before adding it to the
                  parameters.  Therefore, get_step_size() controls the "learning rate" used
                  during training. 
        !*/

        void set_min_step_size (
            double ss
        );
        /*!
            requires
                - ss > 0
            ensures
                - #get_min_step_size() == ss
        !*/

        double get_min_step_size (
        ) const;
        /*!
            ensures
                - During training, this object will test if progress is still being made
                  and if it isn't then it will reduce get_step_size() by setting it to
                  get_step_size()*get_step_size_shrink().  However, it will not reduce it
                  below get_min_step_size().  Once this minimum step size is crossed the
                  training will terminate.
        !*/

196
197
        void set_iterations_without_progress_threshold (
            unsigned long thresh 
198
199
200
        );
        /*!
            ensures
201
                - #get_iterations_without_progress_threshold() == thresh
202
203
        !*/

204
        unsigned long get_iterations_without_progress_threshold (
205
206
207
208
        ) const;
        /*!
            ensures
                - This object monitors the progress of training and estimates if the
209
210
211
212
213
214
215
216
217
218
219
                  training error is being reduced.  It does this by looking at the previous
                  get_iterations_without_progress_threshold() mini-batch results and
                  applying the statistical test defined by the running_gradient object to
                  see if the training error is getting smaller.  If it isn't being reduced
                  then get_step_size() is made smaller by a factor of get_step_size_shrink().

                  Therefore, get_iterations_without_progress_threshold() should always be
                  set to something sensibly large so that this test can be done with
                  reasonably high confidence.  Think of this test as saying "if the loss
                  hasn't been reduced for the previous get_iterations_without_progress_threshold() 
                  then shrink the step size".
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        !*/

        void set_step_size_shrink_amount (
            double shrink
        );
        /*!
            requires
                - 0 < shrink && shrink <= 1
            ensures
                - #get_step_size_shrink() == shrink
        !*/

        double get_step_size_shrink (
        ) const;
        /*!
            ensures
                - Whenever the training routine thinks it isn't making progress anymore it
                  will reduce get_step_size() by multiplying it by get_step_size_shrink().
Davis King's avatar
Davis King committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
        !*/

        void be_verbose (
        );
        /*!
            ensures
                - This object will print status messages to standard out so that a 
                  user can observe the progress of the algorithm.
        !*/

        void be_quiet (
        );
        /*!
            ensures
                - This object will not print anything to standard out
        !*/
254

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        void set_synchronization_file (
            const std::string& filename,
            std::chrono::seconds time_between_syncs = std::chrono::minutes(15)
        );
        /*!
            ensures
                - While training is running, either via train() or repeated calls to
                  train_one_step(), this object will save its entire state, including the
                  state of get_net(), to disk in the file named filename every
                  time_between_syncs seconds.
                - if the filename file already exists then the state of this trainer will
                  be loaded from that file by this call to set_synchronization_file().
                  This allows you to resume a training session which was previously
                  interrupted.
        !*/

        void train (
272
273
274
275
276
277
            const std::vector<input_type>& data,
            const std::vector<label_type>& labels 
        ); 
        /*!
            requires
                - data.size() == labels.size()
Davis King's avatar
Davis King committed
278
279
280
281
282
283
                - net_type uses a supervised loss.  
                  i.e. net_type::label_type != no_label_type.
            ensures
                - Trains a supervised neural network based on the given training data.
                  The goal of training is to find the network parameters that minimize
                  get_net().compute_loss(data.begin(), data.end(), labels.begin()). 
284
285
286
287
                - The optimizer will run until get_step_size() < get_min_step_size() or
                  get_max_num_epochs() training epochs have been executes. 
                - Each layer in the network will be optimized by its corresponding solver
                  in get_solvers().  
Davis King's avatar
Davis King committed
288
                - Each call to train DOES NOT reinitialize the state of get_net() or
289
290
291
292
293
                  get_solvers().  That is, the existing state of the solvers and network is
                  the starting point for the optimization each time train() is called.  In
                  particular, if you use the set_synchronization_file() method you can
                  resume an interrupted train() call by simply calling train() again and it
                  will pick up from the last synchronization point.  
294
295
                - You can obtain the average loss value during the final training epoch by
                  calling get_average_loss().
296
297
        !*/

298
        void train (
299
300
301
302
            const std::vector<input_type>& data
        );
        /*!
            requires 
Davis King's avatar
Davis King committed
303
304
                - net_type uses an unsupervised loss.  
                  i.e. net_type::label_type == no_label_type.
305
            ensures
Davis King's avatar
Davis King committed
306
307
308
                - Trains an unsupervised neural network based on the given training data.
                  The goal of training is to find the network parameters that minimize
                  get_net().compute_loss(data.begin(), data.end()). 
309
310
311
312
                - The optimizer will run until get_step_size() < get_min_step_size() or
                  get_max_num_epochs() training epochs have been executes. 
                - Each layer in the network will be optimized by its corresponding solver
                  in get_solvers().  
Davis King's avatar
Davis King committed
313
                - Each call to train DOES NOT reinitialize the state of get_net() or
314
315
316
317
318
                  get_solvers().  That is, the existing state of the solvers and network is
                  the starting point for the optimization each time train() is called.  In
                  particular, if you use the set_synchronization_file() method you can
                  resume an interrupted train() call by simply calling train() again and it
                  will pick up from the last synchronization point.  
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
                - You can obtain the average loss value during the final training epoch by
                  calling get_average_loss().
        !*/

        void train_one_step (
            const std::vector<input_type>& data,
            const std::vector<label_type>& labels 
        );
        /*!
            requires
                - data.size() == labels.size()
                - net_type uses a supervised loss.  
                  i.e. net_type::label_type != no_label_type.
            ensures
                - Performs one stochastic gradient update step based on the mini-batch of
                  data and labels supplied to this function.  In particular, calling
                  train_one_step() in a loop is equivalent to calling the train() method
                  defined above.  However, train_one_step() allows you to stream data from
                  disk into the training process while train() requires you to first load
                  all the training data into RAM.  Otherwise, these training methods are
                  equivalent.
                - You can observe the current average loss value by calling get_average_loss().
        !*/

        void train_one_step (
            const std::vector<input_type>& data
        );
        /*!
            requires
                - net_type uses an unsupervised loss.  
                  i.e. net_type::label_type == no_label_type.
            ensures
                - Performs one stochastic gradient update step based on the mini-batch of
                  data supplied to this function.  In particular, calling train_one_step()
                  in a loop is equivalent to calling the train() method defined above.
                  However, train_one_step() allows you to stream data from disk into the
                  training process while train() requires you to first load all the
                  training data into RAM.  Otherwise, these training methods are
                  equivalent.
                - You can observe the current average loss value by calling get_average_loss().
        !*/

        double get_average_loss (
        ) const;
        /*!
            ensures
                - returns the average loss value observed during previous calls to
                  train_one_step() or train().  That is, the average output of
                  net_type::update() during the previous mini-batch updates.
        !*/

        void clear_average_loss (
        );
        /*!
            ensures
                - #get_average_loss() == 0
                - get_average_loss() uses a dlib::running_stats object to keep a running
                  average of the loss values seen during the previous mini-batch updates
                  applied during training.  Calling clear_average_loss() resets the
                  running_stats object so it forgets about all previous loss values
                  observed.
380
381
382
383
384
385
386
387
388
389
390
        !*/

    };

// ----------------------------------------------------------------------------------------

}

#endif // DLIB_DNn_TRAINER_ABSTRACT_H_