onnx.cpp 35.6 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
4
5
6
7
8
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <functional>
#include <array>
Paul's avatar
Paul committed
9
#include <utility>
10
#include <vector>
Paul's avatar
Paul committed
11

Paul's avatar
Paul committed
12
13
14
15
16
17
#include <migraphx/fallthrough.hpp>
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
18
#include <migraphx/onnx.hpp>
Paul's avatar
Paul committed
19
20

namespace migraphx {
Paul's avatar
Paul committed
21
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
22
23
24
25
26

struct onnx_parser
{
    using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
    using node_map      = std::unordered_map<std::string, onnx::NodeProto>;
Paul's avatar
Paul committed
27
    using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
Paul's avatar
Paul committed
28
29
    node_map nodes;
    std::unordered_map<std::string, instruction_ref> instructions;
Scott Thornton's avatar
Scott Thornton committed
30
    program prog    = program();
31
    bool is_pytorch = false;
Paul's avatar
Paul committed
32
33
34
35
36

    std::unordered_map<std::string, op_func> ops;

    onnx_parser()
    {
Shucai Xiao's avatar
Shucai Xiao committed
37
        add_generic_op("MatMul", op::dot{});
Khalique's avatar
Khalique committed
38
        add_generic_op("Relu", op::relu{});
Khalique's avatar
Khalique committed
39
40
        add_generic_op("Sigmoid", op::sigmoid{});
        add_generic_op("Abs", op::abs{});
Shucai Xiao's avatar
Shucai Xiao committed
41
42
        add_generic_op("Exp", op::exp{});
        add_generic_op("Log", op::log{});
Khalique's avatar
Khalique committed
43
44
        // disable dropout for inference
        add_generic_op("Dropout", op::identity{});
Khalique's avatar
Khalique committed
45
        add_generic_op("Identity", op::identity{});
Shucai Xiao's avatar
Shucai Xiao committed
46
47
48
        add_generic_op("Sin", op::sin{});
        add_generic_op("Cos", op::cos{});
        add_generic_op("Tan", op::tan{});
49
50
        add_generic_op("Sinh", op::sinh{});
        add_generic_op("Cosh", op::cosh{});
51
        add_generic_op("Tanh", op::tanh{});
52
53
54
        add_generic_op("Asin", op::asin{});
        add_generic_op("Acos", op::acos{});
        add_generic_op("Atan", op::atan{});
Paul's avatar
Paul committed
55

Khalique's avatar
Khalique committed
56
57
58
59
60
        add_binary_op("Add", op::add{});
        add_binary_op("Div", op::div{});
        add_binary_op("Mul", op::mul{});
        add_binary_op("Sub", op::sub{});

Khalique's avatar
Khalique committed
61
62
63
        add_variadic_op("Sum", op::add{});
        add_variadic_op("Max", op::max{});
        add_variadic_op("Min", op::min{});
Paul's avatar
Paul committed
64

Khalique's avatar
Khalique committed
65
        add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
66
        add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
Khalique's avatar
Khalique committed
67
        add_mem_op("Elu", &onnx_parser::parse_elu);
Paul's avatar
Paul committed
68
69
        add_mem_op("Constant", &onnx_parser::parse_constant);
        add_mem_op("Conv", &onnx_parser::parse_conv);
Paul's avatar
Paul committed
70
71
        add_mem_op("MaxPool", &onnx_parser::parse_pooling);
        add_mem_op("AveragePool", &onnx_parser::parse_pooling);
72
73
        add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
        add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
Paul's avatar
Paul committed
74
        add_mem_op("Reshape", &onnx_parser::parse_reshape);
Paul's avatar
Paul committed
75
76
        add_mem_op("Flatten", &onnx_parser::parse_flatten);
        add_mem_op("Gemm", &onnx_parser::parse_gemm);
77
        add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
Paul's avatar
Paul committed
78
        add_mem_op("Softmax", &onnx_parser::parse_softmax);
79
80
81
        add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
        add_mem_op("Unsqueeze", &onnx_parser::parse_unsqueeze);
        add_mem_op("Slice", &onnx_parser::parse_slice);
Scott Thornton's avatar
Scott Thornton committed
82
        add_mem_op("Concat", &onnx_parser::parse_concat);
83
84
85
        add_mem_op("Gather", &onnx_parser::parse_gather);
        add_mem_op("Shape", &onnx_parser::parse_shape);
        add_mem_op("ConstantFill", &onnx_parser::parse_constant_fill);
Khalique's avatar
Khalique committed
86
        add_mem_op("Transpose", &onnx_parser::parse_transpose);
Khalique's avatar
Khalique committed
87
        add_mem_op("Pad", &onnx_parser::parse_pad);
Paul's avatar
Paul committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
    }

    template <class F>
    void add_op(std::string name, F f)
    {
        ops.emplace(name, f);
    }

    template <class F>
    void add_mem_op(std::string name, F f)
    {
        ops.emplace(name, [=](auto&&... xs) {
            return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
        });
    }
Khalique's avatar
Khalique committed
103

104
    template <class T>
Khalique's avatar
Khalique committed
105
    void add_binary_op(std::string name, T x)
106
107
    {
        ops.emplace(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
Scott Thornton's avatar
Scott Thornton committed
108
            if(args.size() != 2)
Paul's avatar
Paul committed
109
                MIGRAPHX_THROW("binary operators should have 2 operands");
110
            if(contains(attributes, "broadcast") and contains(attributes, "axis"))
111
112
113
114
            {
                uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
                if(broadcasted != 0)
                {
115
                    uint64_t axis = parse_value(attributes.at("axis")).at<uint64_t>();
116
117
118
119
                    auto l =
                        prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
                    return prog.add_instruction(x, args[0], l);
                }
120
                return prog.add_instruction(x, args);
121
            }
Paul's avatar
Paul committed
122
            else
123
            {
Khalique's avatar
Khalique committed
124
                return add_broadcastable_binary_op(args[0], args[1], x);
125
126
127
128
            }
        });
    }

Khalique's avatar
Khalique committed
129
130
131
132
133
    template <class T>
    instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x)
    {
        if(arg0->get_shape() != arg1->get_shape())
        {
Khalique's avatar
Khalique committed
134
135
136
137
138
139
140
141
142
143
144
145
146
            // Example:
            // s0 = (3,2,4,5) and s1 = (2,1,1)
            //
            // In this case we need to broadcast (:,1,1) portion of
            // s1 plus broadcast the 1st dimension of s1
            // giving output_lens = (3,2,4,5)
            //
            // Another example:
            // s0 = (3,2,1,5) and s1 = (2,7,5)
            // In this case we need to broadcast the (:,:,1:,:) axis
            // of s0 plus the 1st dimension of s1 giving
            // output_lens = (3,2,7,5)
            //
Khalique's avatar
Khalique committed
147
148
149
150
151
152
153
154
            // Get lengths for both arguments
            const std::vector<std::size_t>* s0 = &arg0->get_shape().lens();
            const std::vector<std::size_t>* s1 = &arg1->get_shape().lens();

            // Make sure s0 is the smaller size
            if(s0->size() > s1->size())
                std::swap(s0, s1);

Khalique's avatar
Khalique committed
155
            std::vector<std::size_t> output_lens(*s1);
Khalique's avatar
Khalique committed
156
157
            auto offset = s1->size() - s0->size();
            std::transform(s0->begin(),
Khalique's avatar
Khalique committed
158
159
160
161
                           s0->end(),
                           s1->begin() + offset,
                           output_lens.begin() + offset,
                           [](auto a, auto b) { return std::max(a, b); });
Khalique's avatar
Khalique committed
162
163
164
165
166
167
168
169
170

            auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, arg0);
            auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, arg1);
            return prog.add_instruction(x, l0, l1);
        }
        else
        {
            return prog.add_instruction(x, {arg0, arg1});
        }
171
172
    }

Paul's avatar
Paul committed
173
    template <class T>
Paul's avatar
Paul committed
174
175
    void add_generic_op(std::string name, T x)
    {
Paul's avatar
Paul committed
176
        ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Paul's avatar
Paul committed
177
178
179
180
            return prog.add_instruction(x, args);
        });
    }

Khalique's avatar
Khalique committed
181
    template <class T>
Khalique's avatar
Khalique committed
182
    void add_variadic_op(std::string name, T x)
Khalique's avatar
Khalique committed
183
    {
Khalique's avatar
Khalique committed
184
185
        ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
            return std::accumulate(std::next(args.begin()),
Khalique's avatar
Khalique committed
186
187
188
189
190
                                   args.end(),
                                   args.front(),
                                   [this, x](instruction_ref a, instruction_ref b) {
                                       return add_broadcastable_binary_op(a, b, x);
                                   });
Khalique's avatar
Khalique committed
191
        });
Khalique's avatar
Khalique committed
192
193
    }

Paul's avatar
Paul committed
194
    instruction_ref
Paul's avatar
Paul committed
195
    parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
196
197
    {
        auto dims = args.front()->get_shape().lens();
Scott Thornton's avatar
Scott Thornton committed
198
199
        auto r =
            prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
200
201
        auto s = prog.add_instruction(op::softmax{}, r);
        return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
Paul's avatar
Paul committed
202
203
    }

Paul's avatar
Paul committed
204
    instruction_ref
Paul's avatar
Paul committed
205
    parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
206
    {
207
        op::convolution op;
Paul's avatar
Paul committed
208
209
        if(contains(attributes, "pads"))
        {
Scott Thornton's avatar
Scott Thornton committed
210
            if(contains(attributes, "auto_pad"))
211
            {
Paul's avatar
Paul committed
212
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
213
214
215
            }
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
216
            if(padding.size() != 4)
217
            {
Paul's avatar
Paul committed
218
                MIGRAPHX_THROW("padding should have 4 values");
219
            }
Scott Thornton's avatar
Scott Thornton committed
220
            if(padding[0] != padding[2] || padding[1] != padding[3])
221
            {
Paul's avatar
Paul committed
222
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
223
224
225
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
226
        }
Paul's avatar
Paul committed
227
228
229
230
231
232
233
234
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "dilations"))
        {
            copy(attributes["dilations"].ints(), op.dilation.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
235
        if(contains(attributes, "auto_pad"))
236
237
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
238
            if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
239
            {
Paul's avatar
Paul committed
240
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
241
242
            }

wsttiger's avatar
fixes  
wsttiger committed
243
            if(s.find("SAME") != std::string::npos)
244
245
246
247
            {
                op.padding_mode = op::convolution::same;
            }
        }
Khalique's avatar
Khalique committed
248
249
250
251
        if(contains(attributes, "group"))
        {
            op.group = parse_value(attributes.at("group")).at<int>();
        }
Paul's avatar
Paul committed
252
253
254
255
        if(args.size() == 3)
        {
            uint64_t axis = 1;
            auto l1       = prog.add_instruction(op, args[0], args[1]);
Scott Thornton's avatar
Scott Thornton committed
256
            auto l2       = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
257
            return prog.add_instruction(op::add{}, l1, l2);
Paul's avatar
Paul committed
258
        }
Paul's avatar
Paul committed
259
260
        return prog.add_instruction(op, args);
    }
Paul's avatar
Paul committed
261

Paul's avatar
Paul committed
262
263
264
    instruction_ref parse_pooling(const std::string& name,
                                  attribute_map attributes,
                                  std::vector<instruction_ref> args)
Paul's avatar
Paul committed
265
    {
Khalique's avatar
Khalique committed
266
267
        op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
        if(starts_with(name, "Global"))
268
        {
Khalique's avatar
Khalique committed
269
270
            auto lens  = args.front()->get_shape().lens();
            op.lengths = {lens[2], lens[3]};
271
        }
Paul's avatar
Paul committed
272
273
        if(contains(attributes, "pads"))
        {
274
275
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
276
            if(padding.size() != 4)
277
            {
Paul's avatar
Paul committed
278
                MIGRAPHX_THROW("padding should have 4 values");
279
            }
Scott Thornton's avatar
Scott Thornton committed
280
            if(padding[0] != padding[2] || padding[1] != padding[3])
281
            {
Paul's avatar
Paul committed
282
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
283
284
285
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
286
287
288
289
290
291
292
293
294
        }
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "kernel_shape"))
        {
            copy(attributes["kernel_shape"].ints(), op.lengths.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
295
        if(contains(attributes, "auto_pad"))
296
297
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
298
            if(to_upper(s) != "NOTSET")
299
            {
Paul's avatar
Paul committed
300
                MIGRAPHX_THROW("auto_pad is not supported for pooling");
301
302
303
            }
        }

Paul's avatar
Paul committed
304
        return prog.add_instruction(op, std::move(args));
Paul's avatar
Paul committed
305
306
    }

Paul's avatar
Paul committed
307
    instruction_ref
Paul's avatar
Paul committed
308
    parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
309
    {
310
        op::reshape op;
Paul's avatar
Paul committed
311
312
313
314
315
316
317
        if(args.size() == 1)
        {
            literal s = parse_value(attributes.at("shape"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
        }
        if(args.size() == 2)
        {
Paul's avatar
Paul committed
318
            literal s = args[1]->get_literal();
Paul's avatar
Paul committed
319
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
Paul's avatar
Paul committed
320
        }
Paul's avatar
Paul committed
321
322
323
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
324
    instruction_ref
Paul's avatar
Paul committed
325
    parse_flatten(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
326
    {
327
        uint64_t axis = 1;
Paul's avatar
Paul committed
328
329
330
331
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
332
        return prog.add_instruction(op::flatten{axis}, args[0]);
Paul's avatar
Paul committed
333
334
    }

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
    instruction_ref
    parse_squeeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::squeeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

    instruction_ref
    parse_unsqueeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::unsqueeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

Scott Thornton's avatar
Scott Thornton committed
353
354
355
356
357
358
359
    instruction_ref
    parse_concat(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = parse_value(attributes.at("axis")).at<int>();
        op::concat op{axis};
        return prog.add_instruction(op, std::move(args));
    }
360

361
362
363
364
365
366
367
368
    instruction_ref
    parse_gather(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = 0;
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
369
        op::gather op{axis};
370
371
372
        return prog.add_instruction(op, std::move(args));
    }

373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
    instruction_ref
    parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::slice op;
        if(contains(attributes, "axes"))
        {
            literal s = parse_value(attributes.at("axes"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        }
        {
            literal s = parse_value(attributes.at("ends"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
        }
        {
            literal s = parse_value(attributes.at("starts"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
        }
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
393
394
395
    instruction_ref parse_constant(const std::string&,
                                   attribute_map attributes,
                                   const std::vector<instruction_ref>&)
Paul's avatar
Paul committed
396
397
398
399
    {
        literal v = parse_value(attributes.at("value"));
        return prog.add_literal(v);
    }
Paul's avatar
Paul committed
400

Paul's avatar
Paul committed
401
    instruction_ref
Paul's avatar
Paul committed
402
    parse_gemm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
403
404
    {
        float alpha = 1.0f;
Khalique's avatar
Khalique committed
405
        float beta  = 1.0f;
Paul's avatar
Paul committed
406
407
408
409
410
411
412
413
        bool transa = false;
        bool transb = false;
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        if(contains(attributes, "beta"))
        {
414
            beta = parse_value(attributes.at("beta")).at<float>();
Paul's avatar
Paul committed
415
416
417
418
419
420
421
422
423
424
        }
        if(contains(attributes, "transA"))
        {
            transa = parse_value(attributes.at("transA")).at<bool>();
        }
        if(contains(attributes, "transB"))
        {
            transb = parse_value(attributes.at("transB")).at<bool>();
        }
        std::vector<int64_t> perm = {1, 0};
425
426
        auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
        auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
Paul's avatar
Paul committed
427
428
        if(args.size() == 3)
        {
Khalique's avatar
Khalique committed
429
            if(beta != 0.f)
430
            {
Khalique's avatar
Khalique committed
431
                auto l3 = prog.add_instruction(op::dot{alpha}, l1, l2);
Khalique's avatar
Khalique committed
432
                auto l4 = args[2];
Khalique's avatar
Khalique committed
433
                if(l4->get_shape().scalar()) // ignore args[2] (no C value added to alpha*A*B)
Khalique's avatar
Khalique committed
434
                    return l3;
Khalique's avatar
Khalique committed
435
                if(beta != 1.f)
Khalique's avatar
Khalique committed
436
437
                {
                    auto beta_val = prog.add_literal(beta);
Khalique's avatar
Khalique committed
438
439
                    auto l5 = prog.add_instruction(op::scalar{args[2]->get_shape()}, beta_val);
                    l4      = prog.add_instruction(op::mul{}, args[2], l5);
Khalique's avatar
Khalique committed
440
441
                }
                return add_broadcastable_binary_op(l3, l4, op::add{});
442
            }
Paul's avatar
Paul committed
443
        }
Shucai Xiao's avatar
Shucai Xiao committed
444
        return prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Paul's avatar
Paul committed
445
446
    }

447
    instruction_ref
Paul's avatar
Paul committed
448
    parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
449
    {
Scott Thornton's avatar
Scott Thornton committed
450
451
        float epsilon                                     = 1e-5f;
        float momentum                                    = 0.9f;
452
        op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
Scott Thornton's avatar
Scott Thornton committed
453
        bool is_test                                      = false;
454
455
456
457
458
459
        if(contains(attributes, "epsilon"))
        {
            epsilon = parse_value(attributes.at("epsilon")).at<float>();
        }
        if(contains(attributes, "momentum"))
        {
460
            momentum = parse_value(attributes.at("momentum")).at<float>();
461
462
463
        }
        if(contains(attributes, "is_test"))
        {
wsttiger's avatar
wsttiger committed
464
            is_test = parse_value(attributes.at("is_test")).at<uint64_t>() > 0;
465
466
467
        }
        if(contains(attributes, "spatial"))
        {
468
            bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
469
470
                          ? op::batch_norm_inference::spatial
                          : op::batch_norm_inference::per_activation;
471
        }
Paul's avatar
Paul committed
472
        (void)is_test;
Paul's avatar
Paul committed
473
        op::batch_norm_inference op{epsilon, momentum, bn_mode};
Paul's avatar
Paul committed
474
        return prog.add_instruction(op, std::move(args));
475
476
    }

477
478
479
480
    instruction_ref parse_leaky_relu(const std::string&,
                                     attribute_map attributes,
                                     std::vector<instruction_ref> args)
    {
Khalique's avatar
Khalique committed
481
        float alpha = 0.01; // default alpha val for leaky relu
482
483
484
485
486
487
488
489
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::leaky_relu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
490
491
    instruction_ref
    parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
492
493
494
495
496
497
498
499
500
501
    {
        float alpha = 1.0; // default alpha val for elu
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::elu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
    instruction_ref parse_imagescaler(const std::string&,
                                      attribute_map attributes,
                                      std::vector<instruction_ref> args)
    {
        float scale = 1.0;
        std::vector<float> bias{};
        if(contains(attributes, "scale"))
        {
            scale = parse_value(attributes.at("scale")).at<float>();
        }

        if(contains(attributes, "bias"))
        {
            auto&& bias_floats = attributes["bias"].floats();
            bias               = std::vector<float>(bias_floats.begin(), bias_floats.end());
        }
        auto input_shape = args.front()->get_shape();
Khalique's avatar
Khalique committed
519

Khalique's avatar
Khalique committed
520
521
        auto scale_val = prog.add_literal(scale);
        auto bias_vals = prog.add_literal(
Paul's avatar
Paul committed
522
            migraphx::literal{migraphx::shape{migraphx::shape::float_type, {bias.size()}}, bias});
Khalique's avatar
Khalique committed
523

Paul's avatar
Paul committed
524
525
        auto scale_tensor = prog.add_instruction(migraphx::op::scalar{input_shape}, scale_val);
        auto img_scaled   = prog.add_instruction(migraphx::op::mul{}, args.front(), scale_tensor);
Paul's avatar
Paul committed
526
        auto bias_bcast = prog.add_instruction(migraphx::op::broadcast{1, input_shape}, bias_vals);
Paul's avatar
Paul committed
527
        return prog.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
Khalique's avatar
Khalique committed
528
    }
Khalique's avatar
Khalique committed
529

Khalique's avatar
Khalique committed
530
531
    instruction_ref
    parse_transpose(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
532
533
534
535
536
537
538
    {
        std::vector<int64_t> perm{};
        if(contains(attributes, "perm"))
        {
            auto&& perm_vals = attributes["perm"].ints();
            perm             = std::vector<int64_t>(perm_vals.begin(), perm_vals.end());
        }
Paul's avatar
Paul committed
539
        return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
Khalique's avatar
Khalique committed
540
541
    }

Khalique's avatar
Khalique committed
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
    instruction_ref
    parse_pad(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::vector<int64_t> pads{};
        float value = 0.0f;
        if(contains(attributes, "pads"))
        {
            auto&& pad_vals = attributes["pads"].ints();
            pads            = std::vector<int64_t>(pad_vals.begin(), pad_vals.end());
        }
        if(contains(attributes, "value"))
        {
            value = parse_value(attributes.at("value")).at<float>();
        }
        if(contains(attributes, "mode"))
        {
            auto mode = attributes.at("mode").s();
            if(mode != "constant")
                MIGRAPHX_THROW("migraphx currently only supports constant padding");
        }
        return prog.add_instruction(migraphx::op::pad{pads, value}, args.front());
    }
564
565
566
    // Use a literal instruction to replace the shape since, output of
    // shape operator are literals in migraphx
    instruction_ref
Shucai Xiao's avatar
Shucai Xiao committed
567
    parse_shape(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
568
569
    {
        if(args.size() != 1)
570
            MIGRAPHX_THROW("Shape: operator should have 1 operand");
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
        std::vector<std::size_t> arg_shape = args[0]->get_shape().lens();
        std::vector<int64_t> vec_shape(arg_shape.size());
        migraphx::shape s(migraphx::shape::int64_type, {arg_shape.size()});
        std::transform(arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) {
            return int64_t(i);
        });
        return prog.add_literal(migraphx::literal{s, vec_shape});
    }

    // Use a literal instruction to replace the constantFill operator. In RNN, input shape
    // and value are fixed, so no need to do the actual computation for the constantFill
    // operator
    instruction_ref parse_constant_fill(const std::string&,
                                        attribute_map attributes,
                                        std::vector<instruction_ref> args)
    {
        int input_as_shape = 0;
        int dtype          = 1;
        float value        = 0.0f;

        if(contains(attributes, "dtype"))
        {
            dtype = parse_value(attributes.at("dtype")).at<int>();
        }
        migraphx::shape::type_t type = get_type(dtype);

        if(contains(attributes, "input_as_shape"))
        {
            input_as_shape = parse_value(attributes.at("input_as_shape")).at<int>();
        }

Shucai Xiao's avatar
Shucai Xiao committed
602
603
        if(contains(attributes, "extra_shape"))
        {
604
            MIGRAPHX_THROW("ConstantFill: cannot handle extra shape attribute");
605
606
        }

607
608
        if(input_as_shape == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
609
            if(args.size() != 1)
610
            {
611
                MIGRAPHX_THROW("ConstantFill: need an input argument as output shape");
612
613
            }

Shucai Xiao's avatar
Shucai Xiao committed
614
615
            if(contains(attributes, "shape"))
            {
616
                MIGRAPHX_THROW("ConstantFill: cannot set the shape argument and pass in an input "
Shucai Xiao's avatar
Shucai Xiao committed
617
                               "at the same time");
618
619
            }

620
621
622
            migraphx::argument in = args[0]->eval();
            if(in.empty())
            {
623
                MIGRAPHX_THROW("ConstantFill: cannot handle dynamic shape as input");
624
            }
625

626
627
628
            std::vector<std::size_t> dims;
            in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
            migraphx::shape s(type, dims);
629
630
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
631
632
633
        }
        else if(input_as_shape == 0)
        {
Shucai Xiao's avatar
Shucai Xiao committed
634
635
            if(!contains(attributes, "shape"))
            {
636
                MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
637
638
639
            }

            literal ls = parse_value(attributes.at("shape"));
640
            std::vector<std::size_t> dims;
Shucai Xiao's avatar
Shucai Xiao committed
641
            ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
642
            migraphx::shape s{type, dims};
643
644
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
645
646
647
        }
        else
        {
648
            MIGRAPHX_THROW("ConstantFill: wrong value of attribute input_as_shape");
649
650
        }
    }
Khalique's avatar
Khalique committed
651

Paul's avatar
Paul committed
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
    void parse_from(std::istream& is)
    {
        onnx::ModelProto model;
        if(model.ParseFromIstream(&is))
        {
            if(model.has_graph())
            {
                this->parse_graph(model.graph());
            }
        }
        else
        {
            throw std::runtime_error("Failed reading");
        }
    }

    void parse_graph(const onnx::GraphProto& graph)
    {
        nodes = get_nodes(graph);
671
672
673
674
675
        std::unordered_map<std::string, onnx::TensorProto> initializer_data;
        for(auto&& f : graph.initializer())
        {
            initializer_data[f.name()] = f;
        }
Paul's avatar
Paul committed
676
677
678
        for(auto&& input : graph.input())
        {
            const std::string& name = input.name();
679
680
681
682
683
684
685
686
687
688
689
690
            // Does the input have an initializer?
            if(contains(initializer_data, name))
            {
                auto t             = initializer_data[name];
                instructions[name] = prog.add_literal(parse_tensor(t));
            }
            else
            {
                // TODO: Get shape of input parameter
                shape s            = parse_type(input.type());
                instructions[name] = prog.add_parameter(name, s);
            }
Paul's avatar
Paul committed
691
692
693
        }
        for(auto&& p : nodes)
        {
694
            this->parse_node(get_name(p.second));
Paul's avatar
Paul committed
695
696
697
        }
    }

Paul's avatar
Paul committed
698
    void parse_node(const std::string& name)
Paul's avatar
Paul committed
699
    {
Paul's avatar
Paul committed
700
        if(name.empty())
Paul's avatar
Paul committed
701
            MIGRAPHX_THROW("Onnx node must have a name");
Paul's avatar
Paul committed
702
703
704
705
706
707
708
709
        if(instructions.count(name) == 0)
        {
            auto&& node = nodes.at(name);
            std::vector<instruction_ref> args;
            for(auto&& input : node.input())
            {
                if(nodes.count(input) > 0)
                {
710
                    auto&& iname = get_name(nodes.at(input));
Paul's avatar
Paul committed
711
                    assert(name != iname);
Paul's avatar
Paul committed
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
                    this->parse_node(iname);
                    args.push_back(instructions.at(iname));
                }
                else
                {
                    args.push_back(instructions.at(input));
                }
            }
            if(ops.count(node.op_type()) == 0)
            {
                instructions[name] = prog.add_instruction(unknown{node.op_type()}, args);
            }
            else
            {
                instructions[name] = ops[node.op_type()](get_attributes(node), args);
            }
        }
    }

    static attribute_map get_attributes(const onnx::NodeProto& node)
    {
        std::unordered_map<std::string, onnx::AttributeProto> result;
        for(auto&& attr : node.attribute())
        {
            result[attr.name()] = attr;
        }
        return result;
    }

741
742
743
744
    static std::string get_name(const onnx::NodeProto& node)
    {
        if(node.name().empty())
        {
Paul's avatar
Paul committed
745
            std::string generated = "migraphx_unnamed_node";
Paul's avatar
Paul committed
746
747
748
749
            return std::accumulate(node.output().begin(),
                                   node.output().end(),
                                   generated,
                                   [](auto x, auto y) { return x + "_" + y; });
750
751
752
753
        }
        return node.name();
    }

Paul's avatar
Paul committed
754
755
756
757
758
    static node_map get_nodes(const onnx::GraphProto& graph)
    {
        std::unordered_map<std::string, onnx::NodeProto> result;
        for(auto&& node : graph.node())
        {
759
            result[get_name(node)] = node;
Paul's avatar
Paul committed
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
            for(auto&& output : node.output())
            {
                result[output] = node;
            }
        }
        return result;
    }

    template <class T>
    static literal from_repeated(shape::type_t t, const T& r)
    {
        std::size_t size = r.size();
        return literal{{t, {size}}, r.begin(), r.end()};
    }

    static literal parse_value(const onnx::AttributeProto& attr)
    {
        switch(attr.type())
        {
        case onnx::AttributeProto::UNDEFINED: return {};
        case onnx::AttributeProto::FLOAT: return literal{attr.f()};
        case onnx::AttributeProto::INT: return literal{attr.i()};
        case onnx::AttributeProto::STRING: return {};
        case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
        case onnx::AttributeProto::GRAPH: return {};
Paul's avatar
Paul committed
785
        case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
Paul's avatar
Paul committed
786
787
788
789
790
        case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
        case onnx::AttributeProto::STRINGS: return {};
        case onnx::AttributeProto::TENSORS: return {};
        case onnx::AttributeProto::GRAPHS: return {};
        }
Paul's avatar
Paul committed
791
        MIGRAPHX_THROW("Invalid attribute type");
Paul's avatar
Paul committed
792
793
794
795
796
    }

    static literal parse_tensor(const onnx::TensorProto& t)
    {
        std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
Khalique's avatar
Khalique committed
797
        // in case of scalar constants in onnx file, use dims=1 to fill initializer data
798
        if(dims.empty())
Khalique's avatar
Khalique committed
799
800
801
        {
            dims = {1};
        }
802
803
        if(t.has_raw_data())
        {
wsttiger's avatar
wsttiger committed
804
            const std::string& s = t.raw_data();
Scott Thornton's avatar
Scott Thornton committed
805
806
807
808
809
810
811
812
813
814
815
816
            switch(t.data_type())
            {
            case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
            case onnx::TensorProto::FLOAT: return literal{{shape::float_type, dims}, s.data()};
            case onnx::TensorProto::UINT8: throw std::runtime_error("");
            case onnx::TensorProto::INT8: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::UINT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT32: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT64: return literal{{shape::int64_type, dims}, s.data()};
            case onnx::TensorProto::STRING: throw std::runtime_error("");
            case onnx::TensorProto::BOOL: return literal{{shape::int32_type, dims}, s.data()};
Paul's avatar
Paul committed
817
            case onnx::TensorProto::FLOAT16: return literal{{shape::half_type, dims}, s.data()};
Scott Thornton's avatar
Scott Thornton committed
818
819
820
821
822
823
            case onnx::TensorProto::DOUBLE: return literal{{shape::double_type, dims}, s.data()};
            case onnx::TensorProto::UINT32: throw std::runtime_error("");
            case onnx::TensorProto::UINT64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
            }
Paul's avatar
Paul committed
824
            MIGRAPHX_THROW("Invalid tensor type");
825
        }
Paul's avatar
Paul committed
826
827
828
829
        switch(t.data_type())
        {
        case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
        case onnx::TensorProto::FLOAT:
Paul's avatar
Paul committed
830
            return literal{{shape::float_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
831
832
        case onnx::TensorProto::UINT8: throw std::runtime_error("");
        case onnx::TensorProto::INT8:
Paul's avatar
Paul committed
833
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
834
        case onnx::TensorProto::UINT16:
Paul's avatar
Paul committed
835
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
836
        case onnx::TensorProto::INT16:
Paul's avatar
Paul committed
837
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
838
        case onnx::TensorProto::INT32:
Paul's avatar
Paul committed
839
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
840
        case onnx::TensorProto::INT64:
Paul's avatar
Paul committed
841
            return literal{{shape::int64_type, dims}, t.int64_data().begin(), t.int64_data().end()};
Paul's avatar
Paul committed
842
843
        case onnx::TensorProto::STRING: throw std::runtime_error("");
        case onnx::TensorProto::BOOL:
Paul's avatar
Paul committed
844
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
845
846
        case onnx::TensorProto::FLOAT16:
            return literal{{shape::half_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
847
848
849
850
851
852
853
854
        case onnx::TensorProto::DOUBLE:
            return literal{
                {shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
        case onnx::TensorProto::UINT32: throw std::runtime_error("");
        case onnx::TensorProto::UINT64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
        }
Paul's avatar
Paul committed
855
        MIGRAPHX_THROW("Invalid tensor type");
Paul's avatar
Paul committed
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
    }

    static shape parse_type(const onnx::TypeProto& t)
    {
        shape::type_t shape_type{};
        switch(t.tensor_type().elem_type())
        {
        case onnx::TensorProto::UNDEFINED:
            break; // throw std::runtime_error("Unsupported type UNDEFINED");
        case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
        case onnx::TensorProto::UINT8:
            break; // throw std::runtime_error("Unsupported type UINT8");
        case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
        case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
        case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
        case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
        case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
        case onnx::TensorProto::STRING:
            break; // throw std::runtime_error("Unsupported type STRING");
        case onnx::TensorProto::BOOL:
            break; // throw std::runtime_error("Unsupported type BOOL");
Paul's avatar
Paul committed
877
        case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
Paul's avatar
Paul committed
878
879
880
881
882
883
884
885
886
        case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
        case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
        case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
        case onnx::TensorProto::COMPLEX64:
            break; // throw std::runtime_error("Unsupported type COMPLEX64");
        case onnx::TensorProto::COMPLEX128:
            break; // throw std::runtime_error("Unsupported type COMPLEX128");
        }
        std::vector<std::size_t> dims;
Paul's avatar
Paul committed
887
        auto&& tensor_dims = t.tensor_type().shape().dim();
888
889
890
891
892
893
894
895
896
897
898
        std::transform(tensor_dims.begin(),
                       tensor_dims.end(),
                       std::back_inserter(dims),
                       [](auto&& d) -> std::size_t {
                           if(not d.has_dim_value())
                           {
                               long default_batch_size = 1; // FIXME
                               return default_batch_size;
                           }
                           return d.dim_value();
                       });
Paul's avatar
Paul committed
899
900
        return {shape_type, dims};
    }
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922

    shape::type_t get_type(int dtype)
    {
        switch(dtype)
        {
        case 1: return shape::float_type;
        case 2: return shape::uint8_type;
        case 3: return shape::int8_type;
        case 4: return shape::uint16_type;
        case 5: return shape::int16_type;
        case 6: return shape::int32_type;
        case 7: return shape::int64_type;
        case 10: return shape::half_type;
        case 11: return shape::double_type;
        case 12: return shape::uint32_type;
        case 13: return shape::uint64_type;
        default:
        {
            MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
        }
        }
    }
Paul's avatar
Paul committed
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
};

program parse_onnx(const std::string& name)
{
    std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
    onnx_parser parser;
#ifndef NDEBUG
    // Log the program when it can't be parsed
    try
    {
        parser.parse_from(input);
    }
    catch(...)
    {
        std::cerr << parser.prog << std::endl;
        throw;
    }
#else
    parser.parse_from(input);
#endif
    return std::move(parser.prog);
}

Paul's avatar
Paul committed
946
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
947
} // namespace migraphx