input_abstract.h 5.27 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Copyright (C) 2015  Davis E. King (davis@dlib.net)
// License: Boost Software License   See LICENSE.txt for the full license.
#undef DLIB_DNn_INPUT_ABSTRACT_H_
#ifdef DLIB_DNn_INPUT_ABSTRACT_H_

#include "../matrix.h"
#include "../pixel.h"


namespace dlib
{

// ----------------------------------------------------------------------------------------

Davis King's avatar
Davis King committed
15
    class EXAMPLE_INPUT_LAYER
16
17
18
19
20
21
22
    {
        /*!
            WHAT THIS OBJECT REPRESENTS
                Each deep neural network model in dlib begins with an input layer. The job
                of the input layer is to convert an input_type into a tensor.  Nothing more
                and nothing less.  
                
Davis King's avatar
Davis King committed
23
24
25
                Note that there is no dlib::EXAMPLE_INPUT_LAYER type.  It is shown here
                purely to document the interface that an input layer object must implement.
                If you are using some kind of image or matrix object as your input_type
26
27
                then you can use the provided dlib::input layer defined below.  Otherwise,
                you need to define your own custom input layer.
28
29
30
        !*/
    public:

Davis King's avatar
Davis King committed
31
        EXAMPLE_INPUT_LAYER(
32
33
34
35
        );
        /*!
            ensures
                - Default constructs this object.  This function is not required to do
36
37
                  anything in particular but it must exist, that is, it is required that
                  layer objects be default constructable. 
38
39
        !*/

Davis King's avatar
Davis King committed
40
        EXAMPLE_INPUT_LAYER(
41
42
43
44
45
46
            const some_other_input_layer_type& item
        );
        /*!
            ensures
                - Constructs this object from item.  This form of constructor is optional
                  but it allows you to provide a conversion from one input layer type to
Davis King's avatar
Davis King committed
47
48
49
50
                  another.  For example, the following code is valid only if my_input_layer2 can
                  be constructed from my_input_layer1:
                    relu<fc<relu<fc<my_input_layer1>>>> my_dnn1;
                    relu<fc<relu<fc<my_input_layer2>>>> my_dnn2(my_dnn1);
51
52
53
54
55
56
                  This kind of pattern is useful if you want to use one type of input layer
                  during training but a different type of layer during testing since it
                  allows you to easily convert between related deep neural network types.  
        !*/

        // sample_expansion_factor must be > 0
Davis King's avatar
Davis King committed
57
        const static unsigned int sample_expansion_factor;
58
59
60
61
        typedef whatever_type_to_tensor_expects input_type;

        template <typename input_iterator>
        void to_tensor (
Davis King's avatar
Davis King committed
62
63
            input_iterator ibegin,
            input_iterator iend,
64
            resizable_tensor& data
Davis King's avatar
Davis King committed
65
        ) const;
66
67
        /*!
            requires
Davis King's avatar
Davis King committed
68
69
                - [ibegin, iend) is an iterator range over input_type objects.
                - std::distance(ibegin,iend) > 0
70
71
            ensures
                - Converts the iterator range into a tensor and stores it into #data.
Davis King's avatar
Davis King committed
72
                - #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor. 
73
                  Normally you would have #data.num_samples() == distance(ibegin,iend) but
74
75
                  you can also expand the output by some integer factor so long as the loss
                  you use can deal with it correctly.
76
                - The data in the ith sample of #data corresponds to the input_type object
Davis King's avatar
Davis King committed
77
                  *(ibegin+i/sample_expansion_factor).
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
        !*/
    };

// ----------------------------------------------------------------------------------------

    template <
        typename T
        >
    class input 
    {
        /*!
            REQUIREMENTS ON T
                T is a matrix or array2d object and it must contain some kind of pixel
                type.  I.e. pixel_traits<T::type> must be defined. 

            WHAT THIS OBJECT REPRESENTS
                This is a basic input layer that simply copies images into a tensor.  
        !*/

    public:
        const static unsigned int sample_expansion_factor = 1;
        typedef T input_type;

        template <typename input_iterator>
        void to_tensor (
Davis King's avatar
Davis King committed
103
104
            input_iterator ibegin,
            input_iterator iend,
105
106
107
108
            resizable_tensor& data
        ) const;
        /*!
            requires
Davis King's avatar
Davis King committed
109
110
                - [ibegin, iend) is an iterator range over input_type objects.
                - std::distance(ibegin,iend) > 0
111
112
113
114
115
116
                - The input range should contain image objects that all have the same
                  dimensions.
            ensures
                - Converts the iterator range into a tensor and stores it into #data.  In
                  particular, if the input images have R rows, C columns, and K channels
                  (where K is given by pixel_traits::num) then we will have:
Davis King's avatar
Davis King committed
117
                    - #data.num_samples() == std::distance(ibegin,iend)
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
                    - #data.nr() == R
                    - #data.nc() == C
                    - #data.k() == K
                  For example, a matrix<float,3,3> would turn into a tensor with 3 rows, 3
                  columns, and k()==1.  Or a matrix<rgb_pixel,4,5> would turn into a tensor
                  with 4 rows, 5 columns, and k()==3 (since rgb_pixels have 3 channels).
        !*/
    };

// ----------------------------------------------------------------------------------------

}

#endif // DLIB_DNn_INPUT_ABSTRACT_H_