loss_abstract.h 31.1 KB
Newer Older
Davis King's avatar
Davis King committed
1
2
3
4
5
6
// Copyright (C) 2015  Davis E. King (davis@dlib.net)
// License: Boost Software License   See LICENSE.txt for the full license.
#undef DLIB_DNn_LOSS_ABSTRACT_H_
#ifdef DLIB_DNn_LOSS_ABSTRACT_H_

#include "core_abstract.h"
7
#include "../image_processing/full_object_detection_abstract.h"
Davis King's avatar
Davis King committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

namespace dlib
{

// ----------------------------------------------------------------------------------------

    class EXAMPLE_LOSS_LAYER_
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                A loss layer is the final layer in a deep neural network.  It computes the
                task loss.  That is, it computes a number that tells us how well the
                network is performing on some task, such as predicting a binary label.  

                You can use one of the loss layers that comes with dlib (defined below).
                But importantly, you are able to define your own loss layers to suit your
                needs.  You do this by creating a class that defines an interface matching
                the one described by this EXAMPLE_LOSS_LAYER_ class.  Note that there is no
                dlib::EXAMPLE_LOSS_LAYER_ type.  It is shown here purely to document the
Davis King's avatar
Davis King committed
27
                interface that a loss layer must implement.
Davis King's avatar
Davis King committed
28
29
30
31
32
33
34
35

                A loss layer can optionally provide a to_label() method that converts the
                output of a network into a user defined type.  If to_label() is not
                provided then the operator() methods of add_loss_layer will not be
                available, but otherwise everything will function as normal.

                Finally, note that there are two broad flavors of loss layer, supervised
                and unsupervised.  The EXAMPLE_LOSS_LAYER_ as shown here is a supervised
36
37
                layer.  To make an unsupervised loss you simply leave out the
                training_label_type typedef and the truth iterator argument to
38
                compute_loss_value_and_gradient().
Davis King's avatar
Davis King committed
39
40
41
42
        !*/

    public:

43
44
45
        // In most cases training_label_type and output_label_type will be the same type.
        typedef whatever_type_you_use_for_training_labels training_label_type;
        typedef whatever_type_you_use_for_outout_labels   output_label_type;
Davis King's avatar
Davis King committed
46

Davis King's avatar
Davis King committed
47
48
49
50
51
52
53
        EXAMPLE_LOSS_LAYER_ (
        );
        /*!
            ensures
                - EXAMPLE_LOSS_LAYER_ objects are default constructable.
        !*/

54
55
56
57
58
        EXAMPLE_LOSS_LAYER_ (
            const EXAMPLE_LOSS_LAYER_& item
        );
        /*!
            ensures
Davis King's avatar
Davis King committed
59
                - EXAMPLE_LOSS_LAYER_ objects are copy constructable.
60
        !*/
Davis King's avatar
Davis King committed
61
62
63
64
65
66
67

        // Implementing to_label() is optional.
        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
68
            const tensor& input_tensor,
Davis King's avatar
Davis King committed
69
70
71
72
73
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            requires
Davis King's avatar
Davis King committed
74
                - SUBNET implements the SUBNET interface defined at the top of
Davis King's avatar
Davis King committed
75
                  layers_abstract.h.
76
77
78
                - input_tensor was given as input to the network sub and the outputs are
                  now visible in layer<i>(sub).get_output(), for all valid i.
                - input_tensor.num_samples() > 0
79
                - input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
Davis King's avatar
Davis King committed
80
                - iter == an iterator pointing to the beginning of a range of
81
                  input_tensor.num_samples()/sub.sample_expansion_factor() elements.  Moreover,
82
                  they must be output_label_type elements.
Davis King's avatar
Davis King committed
83
            ensures
84
                - Converts the output of the provided network to output_label_type objects and
Davis King's avatar
Davis King committed
85
                  stores the results into the range indicated by iter.  In particular, for
86
                  all valid i, it will be the case that:
87
                    *(iter+i/sub.sample_expansion_factor()) is populated based on the output of
Davis King's avatar
Davis King committed
88
                    sub and corresponds to the ith sample in input_tensor.
Davis King's avatar
Davis King committed
89
90
91
92
        !*/

        template <
            typename const_label_iterator,
Davis King's avatar
Davis King committed
93
            typename SUBNET
Davis King's avatar
Davis King committed
94
            >
95
        double compute_loss_value_and_gradient (
Davis King's avatar
Davis King committed
96
97
            const tensor& input_tensor,
            const_label_iterator truth, 
Davis King's avatar
Davis King committed
98
            SUBNET& sub
Davis King's avatar
Davis King committed
99
100
101
        ) const;
        /*!
            requires
Davis King's avatar
Davis King committed
102
                - SUBNET implements the SUBNET interface defined at the top of
Davis King's avatar
Davis King committed
103
104
105
106
                  layers_abstract.h.
                - input_tensor was given as input to the network sub and the outputs are
                  now visible in layer<i>(sub).get_output(), for all valid i.
                - input_tensor.num_samples() > 0
107
                - input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
Davis King's avatar
Davis King committed
108
                - for all valid i:
Davis King's avatar
Davis King committed
109
110
                    - layer<i>(sub).get_gradient_input() has the same dimensions as
                      layer<i>(sub).get_output().
Davis King's avatar
Davis King committed
111
                - truth == an iterator pointing to the beginning of a range of
112
                  input_tensor.num_samples()/sub.sample_expansion_factor() elements.  Moreover,
113
                  they must be training_label_type elements.
114
                - for all valid i:
115
                    - *(truth+i/sub.sample_expansion_factor()) is the label of the ith sample in
116
                      input_tensor.
Davis King's avatar
Davis King committed
117
            ensures
Davis King's avatar
Davis King committed
118
                - This function computes a loss function that describes how well the output
Davis King's avatar
Davis King committed
119
120
                  of sub matches the expected labels given by truth.  Let's write the loss
                  function as L(input_tensor, truth, sub).  
121
122
123
124
                - Then compute_loss_value_and_gradient() computes the gradient of L() with
                  respect to the outputs in sub.  Specifically, compute_loss_value_and_gradient() 
                  assigns the gradients into sub by performing the following tensor
                  assignments, for all valid i: 
125
                    - layer<i>(sub).get_gradient_input() = the gradient of
Davis King's avatar
Davis King committed
126
127
128
129
130
                      L(input_tensor,truth,sub) with respect to layer<i>(sub).get_output().
                - returns L(input_tensor,truth,sub)
        !*/
    };

131
132
133
134
135
    std::ostream& operator<<(std::ostream& out, const EXAMPLE_LOSS_LAYER_& item);
    /*!
        print a string describing this layer.
    !*/

Davis King's avatar
Davis King committed
136
137
138
139
140
141
    void to_xml(const EXAMPLE_LOSS_LAYER_& item, std::ostream& out);
    /*!
        This function is optional, but required if you want to print your networks with
        net_to_xml().  Therefore, to_xml() prints a layer as XML.
    !*/

142
143
144
145
146
147
    void serialize(const EXAMPLE_LOSS_LAYER_& item, std::ostream& out);
    void deserialize(EXAMPLE_LOSS_LAYER_& item, std::istream& in);
    /*!
        provides serialization support  
    !*/

Davis King's avatar
Davis King committed
148
149
150
151
    // For each loss layer you define, always define an add_loss_layer template so that
    // layers can be easily composed.  Moreover, the convention is that the layer class
    // ends with an _ while the add_loss_layer template has the same name but without the
    // trailing _.
Davis King's avatar
Davis King committed
152
153
    template <typename SUBNET>
    using EXAMPLE_LOSS_LAYER = add_loss_layer<EXAMPLE_LOSS_LAYER_, SUBNET>;
Davis King's avatar
Davis King committed
154
155
156
157
158
159
160
161
162

// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------

    class loss_binary_hinge_ 
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
Davis King's avatar
Davis King committed
163
                This object implements the loss layer interface defined above by
164
165
166
167
168
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the hinge loss, which is
                appropriate for binary classification problems.  Therefore, the possible
                labels when using this loss are +1 and -1.  Moreover, it will cause the
                network to produce outputs > 0 when predicting a member of the +1 class and
                values < 0 otherwise.
Davis King's avatar
Davis King committed
169
170
171
        !*/
    public:

172
173
        typedef float training_label_type;
        typedef float output_label_type;
Davis King's avatar
Davis King committed
174
175
176
177
178
179

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
180
            const tensor& input_tensor,
Davis King's avatar
Davis King committed
181
182
183
184
185
186
187
188
189
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that: 
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
190
                - sub.get_output().num_samples() == input_tensor.num_samples()
191
                - sub.sample_expansion_factor() == 1
192
193
194
            and the output label is the raw score for each classified object.  If the score
            is > 0 then the classifier is predicting the +1 class, otherwise it is
            predicting the -1 class.
Davis King's avatar
Davis King committed
195
196
197
198
        !*/

        template <
            typename const_label_iterator,
Davis King's avatar
Davis King committed
199
            typename SUBNET
Davis King's avatar
Davis King committed
200
            >
201
        double compute_loss_value_and_gradient (
Davis King's avatar
Davis King committed
202
203
            const tensor& input_tensor,
            const_label_iterator truth, 
Davis King's avatar
Davis King committed
204
            SUBNET& sub
Davis King's avatar
Davis King committed
205
206
        ) const;
        /*!
207
208
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient() 
            except it has the additional calling requirements that: 
Davis King's avatar
Davis King committed
209
210
211
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
212
                - sub.get_output().num_samples() == input_tensor.num_samples()
213
                - sub.sample_expansion_factor() == 1
Davis King's avatar
Davis King committed
214
215
216
217
218
                - all values pointed to by truth are +1 or -1.
        !*/

    };

Davis King's avatar
Davis King committed
219
220
    template <typename SUBNET>
    using loss_binary_hinge = add_loss_layer<loss_binary_hinge_, SUBNET>;
Davis King's avatar
Davis King committed
221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
// ----------------------------------------------------------------------------------------

    class loss_binary_log_ 
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the log loss, which is
                appropriate for binary classification problems.  Therefore, the possible
                labels when using this loss are +1 and -1.  Moreover, it will cause the
                network to produce outputs > 0 when predicting a member of the +1 class and
                values < 0 otherwise.

                To be more specific, this object contains a sigmoid layer followed by a 
                cross-entropy layer.  
        !*/
    public:

240
241
        typedef float training_label_type;
        typedef float output_label_type;
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that: 
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
259
                - sub.sample_expansion_factor() == 1
260
261
262
263
264
265
266
267
268
            and the output label is the raw score for each classified object.  If the score
            is > 0 then the classifier is predicting the +1 class, otherwise it is
            predicting the -1 class.
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
269
        double compute_loss_value_and_gradient (
270
271
272
273
274
            const tensor& input_tensor,
            const_label_iterator truth, 
            SUBNET& sub
        ) const;
        /*!
275
276
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient() 
            except it has the additional calling requirements that: 
277
278
279
280
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
281
                - sub.sample_expansion_factor() == 1
282
283
284
285
286
287
288
289
                - all values pointed to by truth are +1 or -1.
        !*/

    };

    template <typename SUBNET>
    using loss_binary_log = add_loss_layer<loss_binary_log_, SUBNET>;

290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
// ----------------------------------------------------------------------------------------

    class loss_multiclass_log_ 
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the multiclass logistic
                regression loss (e.g. negative log-likelihood loss), which is appropriate
                for multiclass classification problems.  This means that the possible
                labels when using this loss are integers >= 0.  
                
                Moreover, if after training you were to replace the loss layer of the
                network with a softmax layer, the network outputs would give the
                probabilities of each class assignment.  That is, if you have K classes
                then the network should output tensors with the tensor::k()'th dimension
                equal to K.  Applying softmax to these K values gives the probabilities of
                each class.  The index into that K dimensional vector with the highest
                probability is the predicted class label.
        !*/

    public:

313
314
        typedef unsigned long training_label_type;
        typedef unsigned long output_label_type;
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that: 
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
331
                - sub.sample_expansion_factor() == 1
332
            and the output label is the predicted class for each classified object.  The number
Davis King's avatar
Davis King committed
333
            of possible output classes is sub.get_output().k().
334
335
336
337
338
339
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
340
        double compute_loss_value_and_gradient (
341
342
343
344
345
            const tensor& input_tensor,
            const_label_iterator truth, 
            SUBNET& sub
        ) const;
        /*!
346
347
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient() 
            except it has the additional calling requirements that: 
348
349
350
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
351
                - sub.sample_expansion_factor() == 1
352
353
354
355
356
357
358
359
                - all values pointed to by truth are < sub.get_output().k()
        !*/

    };

    template <typename SUBNET>
    using loss_multiclass_log = add_loss_layer<loss_multiclass_log_, SUBNET>;

360
361
362
363
364
365
366
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------

    struct mmod_options
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
367
                This object contains all the parameters that control the behavior of loss_mmod_.
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
        !*/

    public:

        mmod_options() = default;

        // This kind of object detector is a sliding window detector.  These two parameters
        // determine the size of the sliding window.  Since you will usually use the MMOD
        // loss with an image pyramid the detector size determines the size of the smallest
        // object you can detect.
        unsigned long detector_width = 80;
        unsigned long detector_height = 80;

        // These parameters control how we penalize different kinds of mistakes.  See 
        // Max-Margin Object Detection by Davis E. King (http://arxiv.org/abs/1502.00046)
        // for further details.
        double loss_per_false_alarm = 1;
        double loss_per_missed_target = 1;

        // A detection must have an intersection-over-union value greater than this for us
        // to consider it a match against a ground truth box.
        double truth_match_iou_threshold = 0.5;

        // When doing non-max suppression, we use overlaps_nms to decide if a box overlaps
        // an already output detection and should therefore be thrown out.
        test_box_overlap overlaps_nms = test_box_overlap(0.4);

        // Any mmod_rect in the training data that has its ignore field set to true defines
        // an "ignore zone" in an image.  Any detection from that area is totally ignored
        // by the optimizer.  Therefore, this overlaps_ignore field defines how we decide
        // if a box falls into an ignore zone.  You use these ignore zones if there are
        // objects in your dataset that you are unsure if you want to detect or otherwise
        // don't care if the detector gets them or not.  
        test_box_overlap overlaps_ignore;

        mmod_options (
            const std::vector<std::vector<mmod_rect>>& boxes,
            const unsigned long target_size = 6400
        );
        /*!
            ensures
                - This function tries to automatically set the MMOD options so reasonable
                  values assuming you have a training dataset of boxes.size() images, where
                  the ith image contains objects boxes[i] you want to detect and the
                  objects are clearly visible when scale so that they are target_size
                  pixels in area.
                - In particular, this function will automatically set the detector width
                  and height based on the average box size in boxes and the requested
                  target_size.
                - This function will also set the overlaps_nms tester to the most
                  restrictive tester that doesn't reject anything in boxes.
        !*/
    };

    void serialize(const mmod_options& item, std::ostream& out);
    void deserialize(mmod_options& item, std::istream& in);

// ----------------------------------------------------------------------------------------

427
    class loss_mmod_ 
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the Max Margin Object
                Detection loss defined in the paper:
                    Max-Margin Object Detection by Davis E. King (http://arxiv.org/abs/1502.00046).
               
                This means you use this loss if you want to detect the locations of objects
                in images.

                It should also be noted that this loss layer requires an input layer that
                defines the following functions:
                    - image_contained_point()
                    - tensor_space_to_image_space()
                    - image_space_to_tensor_space()
                A reference implementation of them and their definitions can be found in
                the input_rgb_image_pyramid object, which is the recommended input layer to
446
                be used with loss_mmod_.
447
448
449
450
        !*/

    public:

451
452
        typedef std::vector<mmod_rect> training_label_type;
        typedef std::vector<mmod_rect> output_label_type;
453

454
        loss_mmod_(
455
456
457
458
459
460
        );
        /*!
            ensures
                - #get_options() == mmod_options()
        !*/

461
        loss_mmod_(
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
            mmod_options options_
        );
        /*!
            ensures
                - #get_options() == options_
        !*/

        const mmod_options& get_options (
        ) const;
        /*!
            ensures
                - returns the options object that defines the general behavior of this loss layer.
        !*/

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter,
            double adjust_threshold = 0
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that: 
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
            Also, the output labels are std::vectors of mmod_rects where, for each mmod_rect R,
            we have the following interpretations:
                - R.rect == the location of an object in the image.
                - R.detection_confidence the score for the object, the bigger the score the
                  more confident the detector is that an object is really there.  Only
                  objects with a detection_confidence > adjust_threshold are output.  So if
                  you want to output more objects (that are also of less confidence) you
                  can call to_label() with a smaller value of adjust_threshold.
                - R.ignore == false (this value is unused by to_label()).
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
        double compute_loss_value_and_gradient (
            const tensor& input_tensor,
            const_label_iterator truth, 
            SUBNET& sub
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient() 
            except it has the additional calling requirements that: 
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
            Also, the loss value returned is roughly equal to the average number of
            mistakes made per image.  This is the sum of false alarms and missed
            detections, weighted by the loss weights for these types of mistakes specified
            in the mmod_options.
        !*/
    };

    template <typename SUBNET>
526
    using loss_mmod = add_loss_layer<loss_mmod_, SUBNET>;
527

528
529
530
531
532
533
534
535
536
// ----------------------------------------------------------------------------------------

    class loss_metric_ 
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it allows you to learn to map objects
                into a vector space where objects sharing the same class label are close to
Davis King's avatar
Davis King committed
537
                each other, while objects with different labels are far apart.   
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620

                To be specific, it optimizes the following loss function which considers
                all pairs of objects in a mini-batch and computes a different loss depending 
                on their respective class labels.  So if objects A1 and A2 in a mini-batch
                share the same class label then their contribution to the loss is:
                    max(0, length(A1-A2)-get_distance_threshold() + get_margin())

                While if A1 and B1 have different class labels then their contribution to
                the loss function is:
                    max(0, get_distance_threshold()-length(A1-B1) + get_margin())

                Therefore, this loss layer optimizes a version of the hinge loss.
                Moreover, the loss is trying to make sure that all objects with the same
                label are within get_distance_threshold() distance of each other.
                Conversely, if two objects have different labels then they should be more
                than get_distance_threshold() distance from each other in the learned
                embedding.  So this loss function gives you a natural decision boundary for
                deciding if two objects are from the same class.
        !*/
    public:

        typedef unsigned long training_label_type;
        typedef matrix<float,0,1> output_label_type;

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that: 
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
            This loss expects the network to produce a single vector (per sample) as
            output.  This vector is the learned embedding.  Therefore, to_label() just
            copies these output vectors from the network into the output label_iterators
            given to this function, one for each sample in the input_tensor.
        !*/

        float get_margin() const { return 0.1; }
        /*!
            ensures
                - returns the margin value used by the loss function.  See the discussion
                  in WHAT THIS OBJECT REPRESENTS for details.
        !*/

        float get_distance_threshold() const { return 0.75; }
        /*!
            ensures
                - returns the distance threshold value used by the loss function.  See the discussion
                  in WHAT THIS OBJECT REPRESENTS for details.
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
        double compute_loss_value_and_gradient (
            const tensor& input_tensor,
            const_label_iterator truth, 
            SUBNET& sub
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient() 
            except it has the additional calling requirements that: 
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
        !*/

    };

    template <typename SUBNET>
    using loss_metric = add_loss_layer<loss_metric_, SUBNET>;

Davis King's avatar
Davis King committed
621
622
// ----------------------------------------------------------------------------------------

623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
    class loss_mean_squared_
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the mean squared loss, which is
                appropriate for regression problems.
        !*/
    public:

        typedef float training_label_type;
        typedef float output_label_type;

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that:
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
            and the output label is the predicted continuous variable.
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
        double compute_loss_value_and_gradient (
            const tensor& input_tensor,
            const_label_iterator truth,
            SUBNET& sub
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient()
            except it has the additional calling requirements that:
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().k() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
        !*/

    };

    template <typename SUBNET>
    using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;

680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
// ----------------------------------------------------------------------------------------

    class loss_mean_squared_multioutput_
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                This object implements the loss layer interface defined above by
                EXAMPLE_LOSS_LAYER_.  In particular, it implements the mean squared loss, which is
                appropriate for regression problems.
        !*/
    public:

        typedef matrix<float> training_label_type;
        typedef matrix<float> output_label_type;

        template <
            typename SUB_TYPE,
            typename label_iterator
            >
        void to_label (
            const tensor& input_tensor,
            const SUB_TYPE& sub,
            label_iterator iter
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
            it has the additional calling requirements that:
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
            and the output label is the predicted continuous variable.
        !*/

        template <
            typename const_label_iterator,
            typename SUBNET
            >
        double compute_loss_value_and_gradient (
            const tensor& input_tensor,
            const_label_iterator truth,
            SUBNET& sub
        ) const;
        /*!
            This function has the same interface as EXAMPLE_LOSS_LAYER_::compute_loss_value_and_gradient()
            except it has the additional calling requirements that:
                - sub.get_output().nr() == 1
                - sub.get_output().nc() == 1
                - sub.get_output().num_samples() == input_tensor.num_samples()
                - sub.sample_expansion_factor() == 1
                - (*(truth + idx)).nc() == 1 for all idx such that 0 <= idx < sub.get_output().num_samples()
                - (*(truth + idx)).nr() == sub.get_output().k() for all idx such that 0 <= idx < sub.get_output().num_samples()
        !*/

    };

    template <typename SUBNET>
    using loss_mean_squared_multioutput = add_loss_layer<loss_mean_squared_multioutput_, SUBNET>;

739
740
// ----------------------------------------------------------------------------------------

Davis King's avatar
Davis King committed
741
742
743
744
}

#endif // DLIB_DNn_LOSS_ABSTRACT_H_