trainer_abstract.h 16 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
// Copyright (C) 2015  Davis E. King (davis@dlib.net)
// License: Boost Software License   See LICENSE.txt for the full license.
#undef DLIB_DNn_TRAINER_ABSTRACT_H_
#ifdef DLIB_DNn_TRAINER_ABSTRACT_H_

#include "core_abstract.h"
#include "solvers_abstract.h"
#include <vector>


namespace dlib
{

// ----------------------------------------------------------------------------------------

    template <
        typename net_type, 
        typename solver_type = sgd
        >
    class dnn_trainer
    {
        /*!
            REQUIREMENTS ON net_type
                - net_type is an add_loss_layer object.

            REQUIREMENTS ON solver_type
                - solver_type is an implementation of the EXAMPLE_SOLVER interface defined
                  in solvers_abstract.h

            WHAT THIS OBJECT REPRESENTS
Davis King's avatar
Davis King committed
31
32
33
34
                This object is a tool training a deep neural network. To use it you supply
                a neural network type and a solver, then you call train() with your
                training data and it will output a new network instance that has hopefully
                learned something useful from your training data.
35
36
37
38
39
40
41
42
43
44

        !*/

    public:

        typedef typename net_type::label_type label_type;
        typedef typename net_type::input_type input_type;

        dnn_trainer(
        );
Davis King's avatar
Davis King committed
45
46
47
48
        /*!
            ensures
                - #get_net() == a default initialized net_type object.
                - #get_solvers() == a set of default initialized solvers.
49
50
51
52
53
54
                - #get_max_num_epochs() == 10000
                - #get_mini_batch_size() == 128
                - #get_step_size() == 1
                - #get_min_step_size() == 1e-4
                - #get_iterations_between_step_size_adjust() == 2000
                - #get_step_size_shrink() == 0.1
Davis King's avatar
Davis King committed
55
        !*/
56
57
58
59

        explicit dnn_trainer(
            const net_type& net
        );
Davis King's avatar
Davis King committed
60
61
62
63
        /*!
            ensures
                - #get_net() == net 
                - #get_solvers() == a set of default initialized solvers.
64
65
66
67
68
69
                - #get_max_num_epochs() == 10000
                - #get_mini_batch_size() == 128
                - #get_step_size() == 1
                - #get_min_step_size() == 1e-4
                - #get_iterations_between_step_size_adjust() == 2000
                - #get_step_size_shrink() == 0.1
Davis King's avatar
Davis King committed
70
        !*/
71
72
73
74
75

        dnn_trainer(
            const net_type& net, 
            const solver_type& solver
        ); 
Davis King's avatar
Davis King committed
76
77
78
79
80
        /*!
            ensures
                - #get_net() == net 
                - #get_solvers() == a set of solvers that are all initialized with the
                  provided solver instance.
81
82
83
84
85
86
                - #get_max_num_epochs() == 10000
                - #get_mini_batch_size() == 128
                - #get_step_size() == 1
                - #get_min_step_size() == 1e-4
                - #get_iterations_between_step_size_adjust() == 2000
                - #get_step_size_shrink() == 0.1
Davis King's avatar
Davis King committed
87
        !*/
88
89
90

        const net_type& get_net (
        ) const; 
Davis King's avatar
Davis King committed
91
92
93
94
95
        /*!
            ensures
                - returns the neural network object in this trainer.  This is the network
                  that is optimized when you call train().
        !*/
96
97
98
99

        void set_net (
            const net_type& net
        ); 
Davis King's avatar
Davis King committed
100
101
102
103
        /*!
            ensures
                - #get_net() == net
        !*/
104
105

        void set_solver (
Davis King's avatar
Davis King committed
106
            const solver_type& solver
107
        );
Davis King's avatar
Davis King committed
108
109
110
111
112
        /*!
            ensures
                - assigns solver to all the solvers in this object. I.e.  solver will be
                  assigned to each element in get_solvers(). 
        !*/
113

114
        const std::vector<solver_type>& get_solvers (
115
        ) const; 
Davis King's avatar
Davis King committed
116
117
118
119
        /*!
            ensures
                - returns the solvers used to optimize each layer of the neural network
                  get_net().  In particular, the first layer's solver is
120
121
                  get_solvers()[0], the second layer's solver is
                  get_solvers()[1], and so on.
Davis King's avatar
Davis King committed
122
        !*/
123

124
        std::vector<solver_type>& get_solvers (
125
        ); 
Davis King's avatar
Davis King committed
126
127
128
129
        /*!
            ensures
                - returns the solvers used to optimize each layer of the neural network
                  get_net().  In particular, the first layer's solver is
130
131
132
133
134
135
136
137
                  get_solvers()[0], the second layer's solver is
                  get_solvers()[1], and so on.
                - It should be noted that you should never change the number of elements in
                  the vector returned by get_solvers() (i.e. don't do something that
                  changes get_solvers().size()).  It will be set to net_type::num_layers by
                  this object and you should leave it at that.  The non-const version of
                  get_solvers() is provided only so you can tweak the parameters of a
                  particular solver.
Davis King's avatar
Davis King committed
138
        !*/
139
140
141

        unsigned long get_mini_batch_size (
        ) const; 
Davis King's avatar
Davis King committed
142
143
144
145
146
147
148
        /*!
            ensures
                - During training, we call the network's update() routine over and over
                  with training data.  The number of training samples we give to each call
                  to update is the "mini-batch size", which is defined by
                  get_mini_batch_size().
        !*/
149
150
151
152

        void set_mini_batch_size (
            unsigned long batch_size 
        );
Davis King's avatar
Davis King committed
153
154
155
156
157
158
        /*!
            requires
                - batch_size > 0
            ensures
                - #get_mini_batch_size() == batch_size
        !*/
159

160
        unsigned long get_max_num_epochs (
161
        ) const; 
Davis King's avatar
Davis King committed
162
163
        /*!
            ensures
164
165
                - train() will execute at most get_max_num_epochs() iterations over the
                  training data before returning.
Davis King's avatar
Davis King committed
166
        !*/
167

168
        void set_max_num_epochs (
169
            unsigned long num
Davis King's avatar
Davis King committed
170
        );
Davis King's avatar
Davis King committed
171
172
173
174
        /*!
            requires
                - num > 0
            ensures
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
                - #get_max_num_epochs() == num
        !*/

        void set_setep_size (
            double ss
        );
        /*!
            requires
                - ss > 0
            ensures
                - #get_step_size() == ss
        !*/

        double get_step_size(
        ) const;
        /*!
            ensures
                - During each training step, a solver tells us how to modify the parameters
                  of each layer in the network.  It does this by outputting a step vector,
                  that when added to the parameters, will hopefully result in improved
                  network performance.  In our case, at during each step, we multiply the
                  step vector from the solver by get_step_size() before adding it to the
                  parameters.  Therefore, get_step_size() controls the "learning rate" used
                  during training. 
        !*/

        void set_min_step_size (
            double ss
        );
        /*!
            requires
                - ss > 0
            ensures
                - #get_min_step_size() == ss
        !*/

        double get_min_step_size (
        ) const;
        /*!
            ensures
                - During training, this object will test if progress is still being made
                  and if it isn't then it will reduce get_step_size() by setting it to
                  get_step_size()*get_step_size_shrink().  However, it will not reduce it
                  below get_min_step_size().  Once this minimum step size is crossed the
                  training will terminate.
        !*/

        void set_iterations_between_step_size_adjust (
            unsigned long min_iter
        );
        /*!
            ensures
                - #get_iterations_between_step_size_adjust() == min_iter
        !*/

        unsigned long get_iterations_between_step_size_adjust (
        ) const;
        /*!
            ensures
                - This object monitors the progress of training and estimates if the
                  training error is being reduced.  It does this by looking at
                  get_iterations_between_step_size_adjust() mini-batch results and applying
                  the statistical test defined by the running_gradient object to see if the
                  training error is getting smaller.  

                  Therefore, get_iterations_between_step_size_adjust() should always be set
                  to something sensibly large so that this test can be done with reasonably
                  high confidence.
        !*/

        void set_step_size_shrink_amount (
            double shrink
        );
        /*!
            requires
                - 0 < shrink && shrink <= 1
            ensures
                - #get_step_size_shrink() == shrink
        !*/

        double get_step_size_shrink (
        ) const;
        /*!
            ensures
                - Whenever the training routine thinks it isn't making progress anymore it
                  will reduce get_step_size() by multiplying it by get_step_size_shrink().
Davis King's avatar
Davis King committed
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
        !*/

        void be_verbose (
        );
        /*!
            ensures
                - This object will print status messages to standard out so that a 
                  user can observe the progress of the algorithm.
        !*/

        void be_quiet (
        );
        /*!
            ensures
                - This object will not print anything to standard out
        !*/
277
278
279
280
281
282
283
284

        const net_type& train (
            const std::vector<input_type>& data,
            const std::vector<label_type>& labels 
        ); 
        /*!
            requires
                - data.size() == labels.size()
Davis King's avatar
Davis King committed
285
286
287
288
289
290
                - net_type uses a supervised loss.  
                  i.e. net_type::label_type != no_label_type.
            ensures
                - Trains a supervised neural network based on the given training data.
                  The goal of training is to find the network parameters that minimize
                  get_net().compute_loss(data.begin(), data.end(), labels.begin()). 
291
292
293
294
                - The optimizer will run until get_step_size() < get_min_step_size() or
                  get_max_num_epochs() training epochs have been executes. 
                - Each layer in the network will be optimized by its corresponding solver
                  in get_solvers().  
Davis King's avatar
Davis King committed
295
296
297
298
299
300
301
302
303
304
305
                - returns #get_net()
                  (i.e. the trained network can also be accessed by calling get_net() after
                  train() finishes executing)
                - Each call to train DOES NOT reinitialize the state of get_net() or
                  get_solvers().  That is, the state of the solvers and network contained
                  inside this trainer is the starting point for the optimization each time
                  train() is called.  For example, calling train() 1 time and having it
                  execute 100 epochs of training is equivalent to calling train() 10 times
                  and having it execute 10 epochs of training during each call.  This also
                  means you can serialize a trainer to disk and then, at a later date,
                  deserialize it and resume training your network where you left off.
306
307
                - You can obtain the average loss value during the final training epoch by
                  calling get_average_loss().
308
309
310
311
312
313
314
        !*/

        const net_type& train (
            const std::vector<input_type>& data
        );
        /*!
            requires 
Davis King's avatar
Davis King committed
315
316
                - net_type uses an unsupervised loss.  
                  i.e. net_type::label_type == no_label_type.
317
            ensures
Davis King's avatar
Davis King committed
318
319
320
                - Trains an unsupervised neural network based on the given training data.
                  The goal of training is to find the network parameters that minimize
                  get_net().compute_loss(data.begin(), data.end()). 
321
322
323
324
                - The optimizer will run until get_step_size() < get_min_step_size() or
                  get_max_num_epochs() training epochs have been executes. 
                - Each layer in the network will be optimized by its corresponding solver
                  in get_solvers().  
Davis King's avatar
Davis King committed
325
326
327
328
329
330
331
332
333
334
335
                - returns #get_net()
                  (i.e. the trained network can also be accessed by calling get_net() after
                  train() finishes executing)
                - Each call to train DOES NOT reinitialize the state of get_net() or
                  get_solvers().  That is, the state of the solvers and network contained
                  inside this trainer is the starting point for the optimization each time
                  train() is called.  For example, calling train() 1 time and having it
                  execute 100 epochs of training is equivalent to calling train() 10 times
                  and having it execute 10 epochs of training during each call.  This also
                  means you can serialize a trainer to disk and then, at a later date,
                  deserialize it and resume training your network where you left off.
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
                - You can obtain the average loss value during the final training epoch by
                  calling get_average_loss().
        !*/

        void train_one_step (
            const std::vector<input_type>& data,
            const std::vector<label_type>& labels 
        );
        /*!
            requires
                - data.size() == labels.size()
                - net_type uses a supervised loss.  
                  i.e. net_type::label_type != no_label_type.
            ensures
                - Performs one stochastic gradient update step based on the mini-batch of
                  data and labels supplied to this function.  In particular, calling
                  train_one_step() in a loop is equivalent to calling the train() method
                  defined above.  However, train_one_step() allows you to stream data from
                  disk into the training process while train() requires you to first load
                  all the training data into RAM.  Otherwise, these training methods are
                  equivalent.
                - You can observe the current average loss value by calling get_average_loss().
        !*/

        void train_one_step (
            const std::vector<input_type>& data
        );
        /*!
            requires
                - net_type uses an unsupervised loss.  
                  i.e. net_type::label_type == no_label_type.
            ensures
                - Performs one stochastic gradient update step based on the mini-batch of
                  data supplied to this function.  In particular, calling train_one_step()
                  in a loop is equivalent to calling the train() method defined above.
                  However, train_one_step() allows you to stream data from disk into the
                  training process while train() requires you to first load all the
                  training data into RAM.  Otherwise, these training methods are
                  equivalent.
                - You can observe the current average loss value by calling get_average_loss().
        !*/

        double get_average_loss (
        ) const;
        /*!
            ensures
                - returns the average loss value observed during previous calls to
                  train_one_step() or train().  That is, the average output of
                  net_type::update() during the previous mini-batch updates.
        !*/

        void clear_average_loss (
        );
        /*!
            ensures
                - #get_average_loss() == 0
                - get_average_loss() uses a dlib::running_stats object to keep a running
                  average of the loss values seen during the previous mini-batch updates
                  applied during training.  Calling clear_average_loss() resets the
                  running_stats object so it forgets about all previous loss values
                  observed.
397
398
399
400
        !*/

    };

401
402
403
404
405
406
407
408
    template <typename T, typename U>
    void serialize(const dnn_trainer<T,U>& item, std::ostream& out);
    template <typename T, typename U>
    void deserialize(dnn_trainer<T,U>& item, std::istream& in);
    /*!
        provides serialization support  
    !*/

409
410
411
412
413
414
415
// ----------------------------------------------------------------------------------------

}

#endif // DLIB_DNn_TRAINER_ABSTRACT_H_