"vscode:/vscode.git/clone" did not exist on "423623cba424905611c07c7143b73c494a258069"
onnx.cpp 32.3 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
4
5
6
7
8
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <functional>
#include <array>
Paul's avatar
Paul committed
9
#include <utility>
10
#include <vector>
Paul's avatar
Paul committed
11

Paul's avatar
Paul committed
12
13
14
15
16
17
18
19
#include <migraphx/fallthrough.hpp>
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>

namespace migraphx {
Paul's avatar
Paul committed
20
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
struct unknown
{
    std::string op;
    std::string name() const { return "unknown:" + op; }
    shape compute_shape(std::vector<shape> input) const
    {
        if(input.empty())
            return {};
        else
            return input.front();
    }
    friend std::ostream& operator<<(std::ostream& os, const unknown& x)
    {
        os << x.name();
        return os;
    }
};

struct onnx_parser
{
    using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
    using node_map      = std::unordered_map<std::string, onnx::NodeProto>;
Paul's avatar
Paul committed
43
    using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
Paul's avatar
Paul committed
44
45
    node_map nodes;
    std::unordered_map<std::string, instruction_ref> instructions;
Scott Thornton's avatar
Scott Thornton committed
46
    program prog    = program();
47
    bool is_pytorch = false;
Paul's avatar
Paul committed
48
49
50
51
52

    std::unordered_map<std::string, op_func> ops;

    onnx_parser()
    {
Shucai Xiao's avatar
Shucai Xiao committed
53
        add_generic_op("MatMul", op::dot{});
Khalique's avatar
Khalique committed
54
        add_generic_op("Relu", op::relu{});
Khalique's avatar
Khalique committed
55
56
57
        add_generic_op("Sigmoid", op::sigmoid{});
        add_generic_op("Tanh", op::tanh{});
        add_generic_op("Abs", op::abs{});
Khalique's avatar
Khalique committed
58
59
        // disable dropout for inference
        add_generic_op("Dropout", op::identity{});
Khalique's avatar
Khalique committed
60
        add_generic_op("Identity", op::identity{});
Paul's avatar
Paul committed
61

Khalique's avatar
Khalique committed
62
63
64
65
66
67
68
69
        add_binary_op("Add", op::add{});
        add_binary_op("Div", op::div{});
        add_binary_op("Mul", op::mul{});
        add_binary_op("Sub", op::sub{});

        add_mem_op("Sum", &onnx_parser::parse_sum);
        add_mem_op("Max", &onnx_parser::parse_max);
        add_mem_op("Min", &onnx_parser::parse_min);
Paul's avatar
Paul committed
70

Khalique's avatar
Khalique committed
71
        add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
72
        add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
Khalique's avatar
Khalique committed
73
        add_mem_op("Elu", &onnx_parser::parse_elu);
Paul's avatar
Paul committed
74
75
        add_mem_op("Constant", &onnx_parser::parse_constant);
        add_mem_op("Conv", &onnx_parser::parse_conv);
Paul's avatar
Paul committed
76
77
        add_mem_op("MaxPool", &onnx_parser::parse_pooling);
        add_mem_op("AveragePool", &onnx_parser::parse_pooling);
78
79
        add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
        add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
Paul's avatar
Paul committed
80
        add_mem_op("Reshape", &onnx_parser::parse_reshape);
Paul's avatar
Paul committed
81
82
        add_mem_op("Flatten", &onnx_parser::parse_flatten);
        add_mem_op("Gemm", &onnx_parser::parse_gemm);
83
        add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
Paul's avatar
Paul committed
84
        add_mem_op("Softmax", &onnx_parser::parse_softmax);
85
86
87
        add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
        add_mem_op("Unsqueeze", &onnx_parser::parse_unsqueeze);
        add_mem_op("Slice", &onnx_parser::parse_slice);
Scott Thornton's avatar
Scott Thornton committed
88
        add_mem_op("Concat", &onnx_parser::parse_concat);
Khalique's avatar
Khalique committed
89
        add_mem_op("Transpose", &onnx_parser::parse_transpose);
Paul's avatar
Paul committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    }

    template <class F>
    void add_op(std::string name, F f)
    {
        ops.emplace(name, f);
    }

    template <class F>
    void add_mem_op(std::string name, F f)
    {
        ops.emplace(name, [=](auto&&... xs) {
            return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
        });
    }
Khalique's avatar
Khalique committed
105

106
    template <class T>
Khalique's avatar
Khalique committed
107
    void add_binary_op(std::string name, T x)
108
109
    {
        ops.emplace(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
Scott Thornton's avatar
Scott Thornton committed
110
            if(args.size() != 2)
Paul's avatar
Paul committed
111
                MIGRAPHX_THROW("binary operators should have 2 operands");
112
113
114
115
116
117
118
119
120
121
122
123
            if(contains(attributes, "broadcast"))
            {
                uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
                if(broadcasted != 0)
                {
                    uint64_t axis = (contains(attributes, "axis"))
                                        ? parse_value(attributes.at("axis")).at<uint64_t>()
                                        : 0;
                    auto l =
                        prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
                    return prog.add_instruction(x, args[0], l);
                }
124
                return prog.add_instruction(x, args);
125
            }
Khalique's avatar
Khalique committed
126
127
<<<<<<< HEAD
=======
128
            else if(args[0]->get_shape() != args[1]->get_shape())
129
130
131
132
            {
                // Example:
                // s0 = (3,2,4,5) and s1 = (2,1,1)
                //
Scott Thornton's avatar
Scott Thornton committed
133
134
                // In this case we need to broadcast (:,1,1) portion of
                // s1 plus broadcast the 1st dimension of s1
135
136
137
138
139
140
141
142
143
                // giving output_lens = (3,2,4,5)
                //
                // Another example:
                // s0 = (3,2,1,5) and s1 = (2,7,5)
                // In this case we need to broadcast the (:,:,1:,:) axis
                // of s0 plus the 1st dimension of s1 giving
                // output_lens = (3,2,7,5)
                //
                // Get lengths for both arguments
Paul's avatar
Paul committed
144
145
146
147
148
149
                const std::vector<std::size_t>* s0 = &args[0]->get_shape().lens();
                const std::vector<std::size_t>* s1 = &args[1]->get_shape().lens();

                // Make sure s0 is the smaller size
                if(s0->size() > s1->size())
                    std::swap(s0, s1);
150
151

                // Copy the larger vector to output_lens
152
                std::vector<std::size_t> output_lens = *s1;
Scott Thornton's avatar
Scott Thornton committed
153
                auto offset                          = s1->size() - s0->size();
Paul's avatar
Paul committed
154
155
156
157
158
159
                std::transform(s0->begin(),
                               s0->end(),
                               s1->begin() + offset,
                               output_lens.begin() + offset,
                               [](auto a, auto b) { return std::max(a, b); });

160
161
162
                auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, args[0]);
                auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, args[1]);
                return prog.add_instruction(x, l0, l1);
Paul's avatar
Paul committed
163
            }
Khalique's avatar
Khalique committed
164
>>>>>>> 84e7335eb6088f9918dcf86f9fc1b58ef27c3360
Khalique's avatar
Khalique committed
165
            else
166
            {
Khalique's avatar
Khalique committed
167
168
169
170
171
172
173
174
175
176
                return add_broadcastable_binary_op(args[0], args[1], x);
            }
        });
    }

    template <class T>
    instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x)
    {
        if(arg0->get_shape() != arg1->get_shape())
        {
Khalique's avatar
Khalique committed
177
178
179
180
181
182
183
184
185
186
187
188
189
            // Example:
            // s0 = (3,2,4,5) and s1 = (2,1,1)
            //
            // In this case we need to broadcast (:,1,1) portion of
            // s1 plus broadcast the 1st dimension of s1
            // giving output_lens = (3,2,4,5)
            //
            // Another example:
            // s0 = (3,2,1,5) and s1 = (2,7,5)
            // In this case we need to broadcast the (:,:,1:,:) axis
            // of s0 plus the 1st dimension of s1 giving
            // output_lens = (3,2,7,5)
            //
Khalique's avatar
Khalique committed
190
191
192
193
194
195
196
197
198
199
200
            // Get lengths for both arguments
            const std::vector<std::size_t>* s0 = &arg0->get_shape().lens();
            const std::vector<std::size_t>* s1 = &arg1->get_shape().lens();

            // Make sure s0 is the smaller size
            if(s0->size() > s1->size())
                std::swap(s0, s1);

            std::vector<std::size_t> output_lens(s1->size());
            auto offset = s1->size() - s0->size();
            std::transform(s0->begin(),
Khalique's avatar
Khalique committed
201
202
203
204
                           s0->end(),
                           s1->begin() + offset,
                           output_lens.begin() + offset,
                           [](auto a, auto b) { return std::max(a, b); });
Khalique's avatar
Khalique committed
205
206
207
208
209
210
211
212
213

            auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, arg0);
            auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, arg1);
            return prog.add_instruction(x, l0, l1);
        }
        else
        {
            return prog.add_instruction(x, {arg0, arg1});
        }
214
215
    }

Paul's avatar
Paul committed
216
    template <class T>
Paul's avatar
Paul committed
217
218
    void add_generic_op(std::string name, T x)
    {
Paul's avatar
Paul committed
219
        ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Paul's avatar
Paul committed
220
221
222
223
            return prog.add_instruction(x, args);
        });
    }

Khalique's avatar
Khalique committed
224
225
226
227
    instruction_ref
    parse_sum(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
    {
        auto curr_sum = args.front();
Khalique's avatar
Khalique committed
228
        if(args.size() > 1)
Khalique's avatar
Khalique committed
229
        {
Khalique's avatar
Khalique committed
230
            for(auto it = std::next(args.begin()); it != args.end(); ++it)
Khalique's avatar
Khalique committed
231
232
233
234
235
236
237
238
239
240
241
            {
                curr_sum = add_broadcastable_binary_op(curr_sum, *it, op::add{});
            }
        }
        return curr_sum;
    }

    instruction_ref
    parse_max(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
    {
        auto curr_max = args.front();
Khalique's avatar
Khalique committed
242
        if(args.size() > 1)
Khalique's avatar
Khalique committed
243
        {
Khalique's avatar
Khalique committed
244
            for(auto it = std::next(args.begin()); it != args.end(); ++it)
Khalique's avatar
Khalique committed
245
246
247
248
249
250
251
252
253
254
255
            {
                curr_max = add_broadcastable_binary_op(curr_max, *it, op::max{});
            }
        }
        return curr_max;
    }

    instruction_ref
    parse_min(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
    {
        auto curr_min = args.front();
Khalique's avatar
Khalique committed
256
        if(args.size() > 1)
Khalique's avatar
Khalique committed
257
        {
Khalique's avatar
Khalique committed
258
            for(auto it = std::next(args.begin()); it != args.end(); ++it)
Khalique's avatar
Khalique committed
259
260
261
262
263
264
265
            {
                curr_min = add_broadcastable_binary_op(curr_min, *it, op::min{});
            }
        }
        return curr_min;
    }

Paul's avatar
Paul committed
266
    instruction_ref
Paul's avatar
Paul committed
267
    parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
268
269
    {
        auto dims = args.front()->get_shape().lens();
Scott Thornton's avatar
Scott Thornton committed
270
271
        auto r =
            prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
272
273
        auto s = prog.add_instruction(op::softmax{}, r);
        return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
Paul's avatar
Paul committed
274
275
    }

Paul's avatar
Paul committed
276
    instruction_ref
Paul's avatar
Paul committed
277
    parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
278
    {
279
        op::convolution op;
Paul's avatar
Paul committed
280
281
        if(contains(attributes, "pads"))
        {
Scott Thornton's avatar
Scott Thornton committed
282
            if(contains(attributes, "auto_pad"))
283
            {
Paul's avatar
Paul committed
284
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
285
286
287
            }
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
288
            if(padding.size() != 4)
289
            {
Paul's avatar
Paul committed
290
                MIGRAPHX_THROW("padding should have 4 values");
291
            }
Scott Thornton's avatar
Scott Thornton committed
292
            if(padding[0] != padding[2] || padding[1] != padding[3])
293
            {
Paul's avatar
Paul committed
294
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
295
296
297
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
298
        }
Paul's avatar
Paul committed
299
300
301
302
303
304
305
306
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "dilations"))
        {
            copy(attributes["dilations"].ints(), op.dilation.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
307
        if(contains(attributes, "auto_pad"))
308
309
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
310
            if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
311
            {
Paul's avatar
Paul committed
312
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
313
314
            }

wsttiger's avatar
fixes  
wsttiger committed
315
            if(s.find("SAME") != std::string::npos)
316
317
318
319
            {
                op.padding_mode = op::convolution::same;
            }
        }
Paul's avatar
Paul committed
320
321
322
323
        if(args.size() == 3)
        {
            uint64_t axis = 1;
            auto l1       = prog.add_instruction(op, args[0], args[1]);
Scott Thornton's avatar
Scott Thornton committed
324
            auto l2       = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
325
            return prog.add_instruction(op::add{}, l1, l2);
Paul's avatar
Paul committed
326
        }
Paul's avatar
Paul committed
327
328
        return prog.add_instruction(op, args);
    }
Paul's avatar
Paul committed
329

Paul's avatar
Paul committed
330
331
332
    instruction_ref parse_pooling(const std::string& name,
                                  attribute_map attributes,
                                  std::vector<instruction_ref> args)
Paul's avatar
Paul committed
333
    {
Khalique's avatar
Khalique committed
334
335
        op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
        if(starts_with(name, "Global"))
336
        {
Khalique's avatar
Khalique committed
337
338
            auto lens  = args.front()->get_shape().lens();
            op.lengths = {lens[2], lens[3]};
339
        }
Paul's avatar
Paul committed
340
341
        if(contains(attributes, "pads"))
        {
342
343
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
344
            if(padding.size() != 4)
345
            {
Paul's avatar
Paul committed
346
                MIGRAPHX_THROW("padding should have 4 values");
347
            }
Scott Thornton's avatar
Scott Thornton committed
348
            if(padding[0] != padding[2] || padding[1] != padding[3])
349
            {
Paul's avatar
Paul committed
350
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
351
352
353
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
354
355
356
357
358
359
360
361
362
        }
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "kernel_shape"))
        {
            copy(attributes["kernel_shape"].ints(), op.lengths.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
363
        if(contains(attributes, "auto_pad"))
364
365
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
366
            if(to_upper(s) != "NOTSET")
367
            {
Paul's avatar
Paul committed
368
                MIGRAPHX_THROW("auto_pad is not supported for pooling");
369
370
371
            }
        }

Paul's avatar
Paul committed
372
        return prog.add_instruction(op, std::move(args));
Paul's avatar
Paul committed
373
374
    }

Paul's avatar
Paul committed
375
    instruction_ref
Paul's avatar
Paul committed
376
    parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
377
    {
378
        op::reshape op;
Paul's avatar
Paul committed
379
380
381
382
383
384
385
        if(args.size() == 1)
        {
            literal s = parse_value(attributes.at("shape"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
        }
        if(args.size() == 2)
        {
Paul's avatar
Paul committed
386
            literal s = args[1]->get_literal();
Paul's avatar
Paul committed
387
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
Paul's avatar
Paul committed
388
        }
Paul's avatar
Paul committed
389
390
391
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
392
    instruction_ref
Paul's avatar
Paul committed
393
    parse_flatten(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
394
395
    {
        uint64_t axis = 0;
Paul's avatar
Paul committed
396
397
398
399
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
400
        return prog.add_instruction(op::flatten{axis}, args[0]);
Paul's avatar
Paul committed
401
402
    }

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
    instruction_ref
    parse_squeeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::squeeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

    instruction_ref
    parse_unsqueeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::unsqueeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

Scott Thornton's avatar
Scott Thornton committed
421
422
423
424
425
426
427
    instruction_ref
    parse_concat(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = parse_value(attributes.at("axis")).at<int>();
        op::concat op{axis};
        return prog.add_instruction(op, std::move(args));
    }
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448

    instruction_ref
    parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::slice op;
        if(contains(attributes, "axes"))
        {
            literal s = parse_value(attributes.at("axes"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        }
        {
            literal s = parse_value(attributes.at("ends"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
        }
        {
            literal s = parse_value(attributes.at("starts"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
        }
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
449
450
451
    instruction_ref parse_constant(const std::string&,
                                   attribute_map attributes,
                                   const std::vector<instruction_ref>&)
Paul's avatar
Paul committed
452
453
454
455
    {
        literal v = parse_value(attributes.at("value"));
        return prog.add_literal(v);
    }
Paul's avatar
Paul committed
456

Paul's avatar
Paul committed
457
    instruction_ref
Paul's avatar
Paul committed
458
    parse_gemm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
    {
        float alpha = 1.0f;
        float beta  = 0.0f;
        bool transa = false;
        bool transb = false;
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        if(contains(attributes, "beta"))
        {
            alpha = parse_value(attributes.at("beta")).at<float>();
        }
        if(contains(attributes, "transA"))
        {
            transa = parse_value(attributes.at("transA")).at<bool>();
        }
        if(contains(attributes, "transB"))
        {
            transb = parse_value(attributes.at("transB")).at<bool>();
        }
        std::vector<int64_t> perm = {1, 0};
481
482
        auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
        auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
Paul's avatar
Paul committed
483
484
485
        if(args.size() == 3)
        {
            uint64_t axis = 1;
Shucai Xiao's avatar
Shucai Xiao committed
486
            auto l3       = prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Scott Thornton's avatar
Scott Thornton committed
487
            auto l4       = prog.add_instruction(op::broadcast{axis, l3->get_shape()}, args[2]);
488
            return prog.add_instruction(op::add{}, l3, l4);
Paul's avatar
Paul committed
489
        }
Shucai Xiao's avatar
Shucai Xiao committed
490
        return prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Paul's avatar
Paul committed
491
492
    }

493
    instruction_ref
Paul's avatar
Paul committed
494
    parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
495
    {
Scott Thornton's avatar
Scott Thornton committed
496
497
        float epsilon                                     = 1e-5f;
        float momentum                                    = 0.9f;
498
        op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
Scott Thornton's avatar
Scott Thornton committed
499
        bool is_test                                      = false;
500
501
502
503
504
505
        if(contains(attributes, "epsilon"))
        {
            epsilon = parse_value(attributes.at("epsilon")).at<float>();
        }
        if(contains(attributes, "momentum"))
        {
506
            momentum = parse_value(attributes.at("momentum")).at<float>();
507
508
509
        }
        if(contains(attributes, "is_test"))
        {
wsttiger's avatar
wsttiger committed
510
            is_test = parse_value(attributes.at("is_test")).at<uint64_t>() > 0;
511
512
513
        }
        if(contains(attributes, "spatial"))
        {
514
            bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
515
516
                          ? op::batch_norm_inference::spatial
                          : op::batch_norm_inference::per_activation;
517
        }
Paul's avatar
Paul committed
518
        (void)is_test;
Paul's avatar
Paul committed
519
        op::batch_norm_inference op{epsilon, momentum, bn_mode};
Paul's avatar
Paul committed
520
        return prog.add_instruction(op, std::move(args));
521
522
    }

523
524
525
526
    instruction_ref parse_leaky_relu(const std::string&,
                                     attribute_map attributes,
                                     std::vector<instruction_ref> args)
    {
Khalique's avatar
Khalique committed
527
        float alpha = 0.01; // default alpha val for leaky relu
528
529
530
531
532
533
534
535
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::leaky_relu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
536
537
    instruction_ref
    parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
538
539
540
541
542
543
544
545
546
547
    {
        float alpha = 1.0; // default alpha val for elu
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::elu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
    instruction_ref parse_imagescaler(const std::string&,
                                      attribute_map attributes,
                                      std::vector<instruction_ref> args)
    {
        float scale = 1.0;
        std::vector<float> bias{};
        if(contains(attributes, "scale"))
        {
            scale = parse_value(attributes.at("scale")).at<float>();
        }

        if(contains(attributes, "bias"))
        {
            auto&& bias_floats = attributes["bias"].floats();
            bias               = std::vector<float>(bias_floats.begin(), bias_floats.end());
        }
        auto input_shape = args.front()->get_shape();
Khalique's avatar
Khalique committed
565

Khalique's avatar
Khalique committed
566
567
        auto scale_val = prog.add_literal(scale);
        auto bias_vals = prog.add_literal(
Paul's avatar
Paul committed
568
            migraphx::literal{migraphx::shape{migraphx::shape::float_type, {bias.size()}}, bias});
Khalique's avatar
Khalique committed
569

Paul's avatar
Paul committed
570
571
        auto scale_tensor = prog.add_instruction(migraphx::op::scalar{input_shape}, scale_val);
        auto img_scaled   = prog.add_instruction(migraphx::op::mul{}, args.front(), scale_tensor);
Paul's avatar
Paul committed
572
        auto bias_bcast = prog.add_instruction(migraphx::op::broadcast{1, input_shape}, bias_vals);
Paul's avatar
Paul committed
573
        return prog.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
Khalique's avatar
Khalique committed
574
    }
Khalique's avatar
Khalique committed
575

Khalique's avatar
Khalique committed
576
577
    instruction_ref
    parse_transpose(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
578
579
580
581
582
583
584
    {
        std::vector<int64_t> perm{};
        if(contains(attributes, "perm"))
        {
            auto&& perm_vals = attributes["perm"].ints();
            perm             = std::vector<int64_t>(perm_vals.begin(), perm_vals.end());
        }
Paul's avatar
Paul committed
585
        return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
Khalique's avatar
Khalique committed
586
587
    }

Paul's avatar
Paul committed
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
    void parse_from(std::istream& is)
    {
        onnx::ModelProto model;
        if(model.ParseFromIstream(&is))
        {
            if(model.has_graph())
            {
                this->parse_graph(model.graph());
            }
        }
        else
        {
            throw std::runtime_error("Failed reading");
        }
    }

    void parse_graph(const onnx::GraphProto& graph)
    {
        nodes = get_nodes(graph);
607
608
609
610
611
        std::unordered_map<std::string, onnx::TensorProto> initializer_data;
        for(auto&& f : graph.initializer())
        {
            initializer_data[f.name()] = f;
        }
Paul's avatar
Paul committed
612
613
614
        for(auto&& input : graph.input())
        {
            const std::string& name = input.name();
615
616
617
618
619
620
621
622
623
624
625
626
            // Does the input have an initializer?
            if(contains(initializer_data, name))
            {
                auto t             = initializer_data[name];
                instructions[name] = prog.add_literal(parse_tensor(t));
            }
            else
            {
                // TODO: Get shape of input parameter
                shape s            = parse_type(input.type());
                instructions[name] = prog.add_parameter(name, s);
            }
Paul's avatar
Paul committed
627
628
629
        }
        for(auto&& p : nodes)
        {
630
            this->parse_node(get_name(p.second));
Paul's avatar
Paul committed
631
632
633
        }
    }

Paul's avatar
Paul committed
634
    void parse_node(const std::string& name)
Paul's avatar
Paul committed
635
    {
Paul's avatar
Paul committed
636
        if(name.empty())
Paul's avatar
Paul committed
637
            MIGRAPHX_THROW("Onnx node must have a name");
Paul's avatar
Paul committed
638
639
640
641
642
643
644
645
        if(instructions.count(name) == 0)
        {
            auto&& node = nodes.at(name);
            std::vector<instruction_ref> args;
            for(auto&& input : node.input())
            {
                if(nodes.count(input) > 0)
                {
646
                    auto&& iname = get_name(nodes.at(input));
Paul's avatar
Paul committed
647
                    assert(name != iname);
Paul's avatar
Paul committed
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
                    this->parse_node(iname);
                    args.push_back(instructions.at(iname));
                }
                else
                {
                    args.push_back(instructions.at(input));
                }
            }
            if(ops.count(node.op_type()) == 0)
            {
                instructions[name] = prog.add_instruction(unknown{node.op_type()}, args);
            }
            else
            {
                instructions[name] = ops[node.op_type()](get_attributes(node), args);
            }
        }
    }

    static attribute_map get_attributes(const onnx::NodeProto& node)
    {
        std::unordered_map<std::string, onnx::AttributeProto> result;
        for(auto&& attr : node.attribute())
        {
            result[attr.name()] = attr;
        }
        return result;
    }

677
678
679
680
    static std::string get_name(const onnx::NodeProto& node)
    {
        if(node.name().empty())
        {
Paul's avatar
Paul committed
681
            std::string generated = "migraphx_unnamed_node";
Paul's avatar
Paul committed
682
683
684
685
            return std::accumulate(node.output().begin(),
                                   node.output().end(),
                                   generated,
                                   [](auto x, auto y) { return x + "_" + y; });
686
687
688
689
        }
        return node.name();
    }

Paul's avatar
Paul committed
690
691
692
693
694
    static node_map get_nodes(const onnx::GraphProto& graph)
    {
        std::unordered_map<std::string, onnx::NodeProto> result;
        for(auto&& node : graph.node())
        {
695
            result[get_name(node)] = node;
Paul's avatar
Paul committed
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
            for(auto&& output : node.output())
            {
                result[output] = node;
            }
        }
        return result;
    }

    template <class T>
    static literal from_repeated(shape::type_t t, const T& r)
    {
        std::size_t size = r.size();
        return literal{{t, {size}}, r.begin(), r.end()};
    }

    static literal parse_value(const onnx::AttributeProto& attr)
    {
        switch(attr.type())
        {
        case onnx::AttributeProto::UNDEFINED: return {};
        case onnx::AttributeProto::FLOAT: return literal{attr.f()};
        case onnx::AttributeProto::INT: return literal{attr.i()};
        case onnx::AttributeProto::STRING: return {};
        case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
        case onnx::AttributeProto::GRAPH: return {};
Paul's avatar
Paul committed
721
        case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
Paul's avatar
Paul committed
722
723
724
725
726
        case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
        case onnx::AttributeProto::STRINGS: return {};
        case onnx::AttributeProto::TENSORS: return {};
        case onnx::AttributeProto::GRAPHS: return {};
        }
Paul's avatar
Paul committed
727
        MIGRAPHX_THROW("Invalid attribute type");
Paul's avatar
Paul committed
728
729
730
731
732
    }

    static literal parse_tensor(const onnx::TensorProto& t)
    {
        std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
Khalique's avatar
Khalique committed
733
734
735
736
737
        // in case of scalar constants in onnx file, use dims=1 to fill initializer data
        if(dims.size() == 0)
        {
            dims = {1};
        }
738
739
        if(t.has_raw_data())
        {
wsttiger's avatar
wsttiger committed
740
            const std::string& s = t.raw_data();
Scott Thornton's avatar
Scott Thornton committed
741
742
743
744
745
746
747
748
749
750
751
752
            switch(t.data_type())
            {
            case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
            case onnx::TensorProto::FLOAT: return literal{{shape::float_type, dims}, s.data()};
            case onnx::TensorProto::UINT8: throw std::runtime_error("");
            case onnx::TensorProto::INT8: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::UINT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT32: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT64: return literal{{shape::int64_type, dims}, s.data()};
            case onnx::TensorProto::STRING: throw std::runtime_error("");
            case onnx::TensorProto::BOOL: return literal{{shape::int32_type, dims}, s.data()};
Paul's avatar
Paul committed
753
            case onnx::TensorProto::FLOAT16: return literal{{shape::half_type, dims}, s.data()};
Scott Thornton's avatar
Scott Thornton committed
754
755
756
757
758
759
            case onnx::TensorProto::DOUBLE: return literal{{shape::double_type, dims}, s.data()};
            case onnx::TensorProto::UINT32: throw std::runtime_error("");
            case onnx::TensorProto::UINT64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
            }
Paul's avatar
Paul committed
760
            MIGRAPHX_THROW("Invalid tensor type");
761
        }
Paul's avatar
Paul committed
762
763
764
765
        switch(t.data_type())
        {
        case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
        case onnx::TensorProto::FLOAT:
Paul's avatar
Paul committed
766
            return literal{{shape::float_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
767
768
        case onnx::TensorProto::UINT8: throw std::runtime_error("");
        case onnx::TensorProto::INT8:
Paul's avatar
Paul committed
769
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
770
        case onnx::TensorProto::UINT16:
Paul's avatar
Paul committed
771
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
772
        case onnx::TensorProto::INT16:
Paul's avatar
Paul committed
773
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
774
        case onnx::TensorProto::INT32:
Paul's avatar
Paul committed
775
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
776
        case onnx::TensorProto::INT64:
Paul's avatar
Paul committed
777
            return literal{{shape::int64_type, dims}, t.int64_data().begin(), t.int64_data().end()};
Paul's avatar
Paul committed
778
779
        case onnx::TensorProto::STRING: throw std::runtime_error("");
        case onnx::TensorProto::BOOL:
Paul's avatar
Paul committed
780
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
781
782
        case onnx::TensorProto::FLOAT16:
            return literal{{shape::half_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
783
784
785
786
787
788
789
790
        case onnx::TensorProto::DOUBLE:
            return literal{
                {shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
        case onnx::TensorProto::UINT32: throw std::runtime_error("");
        case onnx::TensorProto::UINT64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
        }
Paul's avatar
Paul committed
791
        MIGRAPHX_THROW("Invalid tensor type");
Paul's avatar
Paul committed
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
    }

    static shape parse_type(const onnx::TypeProto& t)
    {
        shape::type_t shape_type{};
        switch(t.tensor_type().elem_type())
        {
        case onnx::TensorProto::UNDEFINED:
            break; // throw std::runtime_error("Unsupported type UNDEFINED");
        case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
        case onnx::TensorProto::UINT8:
            break; // throw std::runtime_error("Unsupported type UINT8");
        case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
        case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
        case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
        case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
        case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
        case onnx::TensorProto::STRING:
            break; // throw std::runtime_error("Unsupported type STRING");
        case onnx::TensorProto::BOOL:
            break; // throw std::runtime_error("Unsupported type BOOL");
Paul's avatar
Paul committed
813
        case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
Paul's avatar
Paul committed
814
815
816
817
818
819
820
821
822
        case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
        case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
        case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
        case onnx::TensorProto::COMPLEX64:
            break; // throw std::runtime_error("Unsupported type COMPLEX64");
        case onnx::TensorProto::COMPLEX128:
            break; // throw std::runtime_error("Unsupported type COMPLEX128");
        }
        std::vector<std::size_t> dims;
Paul's avatar
Paul committed
823
        auto&& tensor_dims = t.tensor_type().shape().dim();
824
825
826
827
828
829
830
831
832
833
834
        std::transform(tensor_dims.begin(),
                       tensor_dims.end(),
                       std::back_inserter(dims),
                       [](auto&& d) -> std::size_t {
                           if(not d.has_dim_value())
                           {
                               long default_batch_size = 1; // FIXME
                               return default_batch_size;
                           }
                           return d.dim_value();
                       });
Paul's avatar
Paul committed
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
        return {shape_type, dims};
    }
};

program parse_onnx(const std::string& name)
{
    std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
    onnx_parser parser;
#ifndef NDEBUG
    // Log the program when it can't be parsed
    try
    {
        parser.parse_from(input);
    }
    catch(...)
    {
        std::cerr << parser.prog << std::endl;
        throw;
    }
#else
    parser.parse_from(input);
#endif
    return std::move(parser.prog);
}

Paul's avatar
Paul committed
860
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
861
} // namespace migraphx