main.cpp 18.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

#include <dlib/xml_parser.h>
#include <dlib/matrix.h>
#include <fstream>
#include <vector>
#include <stack>
#include <set>
#include <dlib/string.h>

using namespace std;
using namespace dlib;


// ----------------------------------------------------------------------------------------

// Only these computational layers have parameters
17
const std::set<string> comp_tags_with_params = {"fc", "fc_no_bias", "con", "affine_con", "affine_fc", "affine", "prelu"};
18
19
20
21
22
23
24
25
26
27
28
29
30

struct layer
{
    string type; // comp, loss, or input
    int idx;

    string detail_name; // The name of the tag inside the layer tag. e.g. fc, con, max_pool, input_rgb_image.
    std::map<string,double> attributes;
    matrix<double> params;
    long tag_id = -1;   // If this isn't -1 then it means this layer was tagged, e.g. wrapped with tag2<> giving tag_id==2
    long skip_id = -1;  // If this isn't -1 then it means this layer draws its inputs from
                        // the most recent layer with tag_id==skip_id rather than its immediate predecessor. 

31
32
33
34
35
36
37
38
39
    double attribute (const string& key) const
    {
        auto i = attributes.find(key);
        if (i != attributes.end())
            return i->second;
        else
            throw dlib::error("Layer doesn't have the requested attribute '" + key + "'.");
    }

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
    string caffe_layer_name() const 
    { 
        if (type == "input")
            return "data";
        else
            return detail_name+to_string(idx);
    }
};

// ----------------------------------------------------------------------------------------

std::vector<layer> parse_dlib_xml(
    const string& xml_filename
);

// ----------------------------------------------------------------------------------------

template <typename iterator>
string find_layer_caffe_name (
    iterator i,
    long tag_id
)
/*!
    requires
        - i is an iterator pointing to a layer in the list of layers produced by parse_dlib_xml().
        - i is not an input layer.
    ensures
        - if (tag_id == -1) then
            - returns the caffe string name for the previous layer to layer i.
        - else
            - returns the caffe string name for the previous layer to layer i with the given tag_id.
!*/
{
    if (tag_id == -1)
    {
        return (i-1)->caffe_layer_name();
    }
    else
    {
        while(true)
        {
            i--;
            // if we hit the end of the network before we found what we were looking for
            if (i->tag_id == tag_id)
                return i->caffe_layer_name();
85
86
            if (i->type == "input")
                throw dlib::error("Network definition is bad, a layer wanted to skip back to a non-existing layer.");
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
        }
    }
}

template <typename iterator>
string find_input_layer_caffe_name (iterator i) { return find_layer_caffe_name(i, i->skip_id); }

// ----------------------------------------------------------------------------------------

template <typename EXP>
void print_as_np_array(std::ostream& out, const matrix_exp<EXP>& m)
{
    out << "np.array([";
    for (auto x : m)
        out << x << ",";
    out << "], dtype='float32')";
}

// ----------------------------------------------------------------------------------------

void convert_dlib_xml_to_cafffe_python_code(
    const string& xml_filename
)
{
111
    const auto layers = parse_dlib_xml(xml_filename);
112
113
114
115
116

    cout << "import caffe " << endl;
    cout << "from caffe import layers as L, params as P" << endl;
    cout << "import numpy as np" << endl;

117
118
    // dlib nets don't commit to a batch size, so just use 1 as the default
    cout << "batch_size = 1;" << endl;
119
120
    if (layers.back().detail_name == "input_rgb_image")
    {
121
122
        cout << "input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default." << endl;
        cout << "input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default." << endl;
123
124
125
126
        cout << "input_k = 3;" << endl;
    }
    else if (layers.back().detail_name == "input_rgb_image_sized")
    {
127
128
        cout << "input_nr = " << layers.back().attribute("nr") << ";" << endl;
        cout << "input_nc = " << layers.back().attribute("nc") << ";" << endl;
129
130
131
132
        cout << "input_k = 3;" << endl;
    }
    else if (layers.back().detail_name == "input")
    {
133
134
        cout << "input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default." << endl;
        cout << "input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default." << endl;
135
136
137
138
139
140
141
142
        cout << "input_k = 1;" << endl;
    }
    else
    {
        throw dlib::error("No known transformation from dlib's " + layers.back().detail_name + " layer to caffe.");
    }

    cout << "def make_netspec():" << endl;
143
144
    cout << "    # For reference, the only \"documentation\" about caffe layer parameters seems to be this page:\n";
    cout << "    # https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto\n" << endl;
145
146
147
148
149
150
151
152
153
154
155
156
157
    cout << "    n = caffe.NetSpec(); " << endl;
    cout << "    n.data,n.label = L.MemoryData(batch_size=batch_size, channels=input_k, height=input_nr, width=input_nc, ntop=2)" << endl;
    // iterate the layers starting with the input layer
    for (auto i = layers.rbegin(); i != layers.rend(); ++i)
    {
        // skip input and loss layers
        if (i->type == "loss" || i->type == "input")
            continue;


        if (i->detail_name == "con")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.Convolution(n." << find_input_layer_caffe_name(i);
158
159
160
161
162
163
164
            cout << ", num_output=" << i->attribute("num_filters");
            cout << ", kernel_w=" << i->attribute("nc");
            cout << ", kernel_h=" << i->attribute("nr");
            cout << ", stride_w=" << i->attribute("stride_x");
            cout << ", stride_h=" << i->attribute("stride_y");
            cout << ", pad_w=" << i->attribute("padding_x");
            cout << ", pad_h=" << i->attribute("padding_y");
165
166
167
168
169
170
171
172
173
174
175
            cout << ");\n";
        }
        else if (i->detail_name == "relu")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.ReLU(n." << find_input_layer_caffe_name(i);
            cout << ");\n";
        }
        else if (i->detail_name == "max_pool")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.Pooling(n." << find_input_layer_caffe_name(i);
            cout << ", pool=P.Pooling.MAX"; 
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
            if (i->attribute("nc")==0)
            {
                cout << ", global_pooling=True";
            }
            else
            {
                cout << ", kernel_w=" << i->attribute("nc");
                cout << ", kernel_h=" << i->attribute("nr");
            }
            if (i->attribute("padding_x") != 0 || i->attribute("padding_y") != 0)
            {
                throw dlib::error("dlib and caffe implement pooling with non-zero padding differently, so you can't convert a "
                    "network with such pooling layers.");
            }

            cout << ", stride_w=" << i->attribute("stride_x");
            cout << ", stride_h=" << i->attribute("stride_y");
            cout << ", pad_w=" << i->attribute("padding_x");
            cout << ", pad_h=" << i->attribute("padding_y");
195
196
197
198
199
            cout << ");\n";
        }
        else if (i->detail_name == "avg_pool")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.Pooling(n." << find_input_layer_caffe_name(i);
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
            cout << ", pool=P.Pooling.AVE"; 
            if (i->attribute("nc")==0)
            {
                cout << ", global_pooling=True";
            }
            else
            {
                cout << ", kernel_w=" << i->attribute("nc");
                cout << ", kernel_h=" << i->attribute("nr");
            }
            if (i->attribute("padding_x") != 0 || i->attribute("padding_y") != 0)
            {
                throw dlib::error("dlib and caffe implement pooling with non-zero padding differently, so you can't convert a "
                    "network with such pooling layers.");
            }

            cout << ", stride_w=" << i->attribute("stride_x");
            cout << ", stride_h=" << i->attribute("stride_y");
            cout << ", pad_w=" << i->attribute("padding_x");
            cout << ", pad_h=" << i->attribute("padding_y");
220
221
222
223
224
            cout << ");\n";
        }
        else if (i->detail_name == "fc")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.InnerProduct(n." << find_input_layer_caffe_name(i);
225
            cout << ", num_output=" << i->attribute("num_outputs");
226
227
228
229
230
231
            cout << ", bias_term=True";
            cout << ");\n";
        }
        else if (i->detail_name == "fc_no_bias")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.InnerProduct(n." << find_input_layer_caffe_name(i);
232
            cout << ", num_output=" << i->attribute("num_outputs");
233
234
235
            cout << ", bias_term=False";
            cout << ");\n";
        }
236
        else if (i->detail_name == "bn_con" || i->detail_name == "bn_fc")
237
        {
238
239
            throw dlib::error("Conversion from dlib's batch norm layers to caffe's isn't supported.  Instead, "
                "you should put your network into 'test mode' by switching batch norm layers to affine layers.");
240
        }
241
        else if (i->detail_name == "affine_con")
242
        {
243
244
245
246
247
248
249
250
251
252
253
            cout << "    n." << i->caffe_layer_name() << " = L.Scale(n." << find_input_layer_caffe_name(i);
            cout << ", axis=1";
            cout << ", bias_term=True";
            cout << ");\n";
        }
        else if (i->detail_name == "affine_fc")
        {
            cout << "    n." << i->caffe_layer_name() << " = L.Scale(n." << find_input_layer_caffe_name(i);
            cout << ", axis=3";
            cout << ", bias_term=True";
            cout << ");\n";
254
255
256
        }
        else if (i->detail_name == "add_prev")
        {
257
258
259
260
            cout << "    n." << i->caffe_layer_name() << " = L.Eltwise(n." << find_input_layer_caffe_name(i);
            cout << ", n." << find_layer_caffe_name(i, i->attribute("tag"));
            cout << ", operation=P.Eltwise.SUM";
            cout << ");\n";
261
262
263
264
265
266
267
268
        }
        else
        {
            throw dlib::error("No known transformation from dlib's " + i->detail_name + " layer to caffe.");
        }
    }
    cout << "    return n.to_proto();\n\n" << endl;

269
270
271
272
273

    // -------------------------
    // -------------------------


274
275
276
277
278
279
280
    cout << "def save_as_caffe_model(def_file, weights_file):\n";
    cout << "    with open(def_file, 'w') as f: f.write(str(make_netspec()));\n";
    cout << "    net = caffe.Net(def_file, caffe.TEST);\n";
    cout << "    set_network_weights(net);\n";
    cout << "    net.save(weights_file);\n\n";


281
282
283
    // -------------------------
    // -------------------------

284
285
286
287
288
289
290
291
292
293
294
295
296

    cout << "def set_network_weights(net):\n";
    cout << "    # populate network parameters\n";
    // iterate the layers starting with the input layer
    for (auto i = layers.rbegin(); i != layers.rend(); ++i)
    {
        // skip input and loss layers
        if (i->type == "loss" || i->type == "input")
            continue;


        if (i->detail_name == "con")
        {
297
            const long num_filters = i->attribute("num_filters");
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
            matrix<double> weights = trans(rowm(i->params,range(0,i->params.size()-num_filters-1)));
            matrix<double> biases  = trans(rowm(i->params,range(i->params.size()-num_filters, i->params.size()-1)));

            // main filter weights
            cout << "    p = "; print_as_np_array(cout,weights); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][0].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][0].data[:] = p;\n";

            // biases
            cout << "    p = "; print_as_np_array(cout,biases); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][1].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][1].data[:] = p;\n";
        }
        else if (i->detail_name == "fc")
        {
            matrix<double> weights = trans(rowm(i->params, range(0,i->params.nr()-2))); 
            matrix<double> biases  = rowm(i->params, i->params.nr()-1); 

            // main filter weights
            cout << "    p = "; print_as_np_array(cout,weights); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][0].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][0].data[:] = p;\n";

            // biases
            cout << "    p = "; print_as_np_array(cout,biases); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][1].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][1].data[:] = p;\n";
        }
        else if (i->detail_name == "fc_no_bias")
        {
            matrix<double> weights = trans(i->params); 

            // main filter weights
            cout << "    p = "; print_as_np_array(cout,weights); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][0].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][0].data[:] = p;\n";
        }
335
        else if (i->detail_name == "affine_con" || i->detail_name == "affine_fc")
336
        {
337
338
339
340
341
342
343
344
345
346
347
348
349
            const long dims = i->params.size()/2;
            matrix<double> gamma = trans(rowm(i->params,range(0,dims-1)));
            matrix<double> beta  = trans(rowm(i->params,range(dims, 2*dims-1)));

            // set gamma weights
            cout << "    p = "; print_as_np_array(cout,gamma); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][0].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][0].data[:] = p;\n";

            // set beta weights 
            cout << "    p = "; print_as_np_array(cout,beta); cout << ";\n";
            cout << "    p.shape = net.params['"<<i->caffe_layer_name()<<"'][1].data.shape;\n";
            cout << "    net.params['"<<i->caffe_layer_name()<<"'][1].data[:] = p;\n";
350
351
352
353
354
355
356
357
358
        }
    }

}

// ----------------------------------------------------------------------------------------

int main(int argc, char** argv) try
{
359
    cout.precision(9);
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
    // TODO, write out to multiple files or just process one file at a time.  
    for (int i = 1; i < argc; ++i)
        convert_dlib_xml_to_cafffe_python_code(argv[i]);

    return 0;
}
catch(std::exception& e)
{
    cout << "\n\n*************** ERROR CONVERTING TO CAFFE ***************\n" << e.what() << endl;
    return 1;
}

// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------

class doc_handler : public document_handler
{
public:
    std::vector<layer> layers;
    bool seen_first_tag = false;

    layer next_layer;
    std::stack<string> current_tag;
    long tag_id = -1;


    virtual void start_document (
    ) 
    { 
        layers.clear(); 
        seen_first_tag = false;
        tag_id = -1;
    }

    virtual void end_document (
    ) { }

    virtual void start_element ( 
        const unsigned long line_number,
        const std::string& name,
        const dlib::attribute_list& atts
    )
    {
        if (!seen_first_tag)
        {
            if (name != "net")
                throw dlib::error("The top level XML tag must be a 'net' tag.");
            seen_first_tag = true;
        }

        if (name == "layer")
        {
            next_layer = layer();
            if (atts["type"] == "skip")
            {
                // Don't make a new layer, just apply the tag id to the previous layer
                if (layers.size() == 0)
                    throw dlib::error("A skip layer was found as the first layer, but the first layer should be an input layer.");
                layers.back().skip_id = sa = atts["id"];
                
                // We intentionally leave next_layer empty so the end_element() callback
                // don't add it as another layer when called.
            }
            else if (atts["type"] == "tag")
            {
                // Don't make a new layer, just remember the tag id so we can apply it on
                // the next layer.
                tag_id = sa = atts["id"];
                
                // We intentionally leave next_layer empty so the end_element() callback
                // don't add it as another layer when called.
            }
            else
            {
                next_layer.idx = sa = atts["idx"];
                next_layer.type = atts["type"];
                if (tag_id != -1)
                {
                    next_layer.tag_id = tag_id;
                    tag_id = -1;
                }
            }
        }
        else if (current_tag.size() != 0 && current_tag.top() == "layer")
        {
            next_layer.detail_name = name;
            // copy all the XML tag's attributes into the layer struct
            atts.reset();
            while (atts.move_next())
                next_layer.attributes[atts.element().key()] = sa = atts.element().value();
        }

        current_tag.push(name);
    }

    virtual void end_element ( 
        const unsigned long line_number,
        const std::string& name
    )
    {
        current_tag.pop();
        if (name == "layer" && next_layer.type.size() != 0)
            layers.push_back(next_layer);
    }

    virtual void characters ( 
        const std::string& data
    )
    {
        if (current_tag.size() == 0)
            return;

        if (comp_tags_with_params.count(current_tag.top()) != 0)
        {
            istringstream sin(data);
            sin >> next_layer.params;
        }

    }

    virtual void processing_instruction (
        const unsigned long line_number,
        const std::string& target,
        const std::string& data
    )
    {
    }
};

// ----------------------------------------------------------------------------------------

std::vector<layer> parse_dlib_xml(
    const string& xml_filename
)
{
    doc_handler dh;
    parse_xml(xml_filename, dh);
    if (dh.layers.size() == 0)
        throw dlib::error("No layers found in XML file!");

    if (dh.layers.back().type != "input")
        throw dlib::error("The network in the XML file is missing an input layer!");

    return dh.layers;
}

// ----------------------------------------------------------------------------------------