onnx.cpp 53.4 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
4
5
6
7
8
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <functional>
#include <array>
Paul's avatar
Paul committed
9
#include <utility>
10
#include <vector>
Paul's avatar
Paul committed
11

Paul's avatar
Paul committed
12
13
14
15
16
17
#include <migraphx/fallthrough.hpp>
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
18
#include <migraphx/onnx.hpp>
Paul's avatar
Paul committed
19
20

namespace migraphx {
Paul's avatar
Paul committed
21
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
22
23
24
25
26

struct onnx_parser
{
    using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
    using node_map      = std::unordered_map<std::string, onnx::NodeProto>;
Paul's avatar
Paul committed
27
28
    using op_func =
        std::function<std::vector<instruction_ref>(attribute_map, std::vector<instruction_ref>)>;
Paul's avatar
Paul committed
29
30
    node_map nodes;
    std::unordered_map<std::string, instruction_ref> instructions;
Scott Thornton's avatar
Scott Thornton committed
31
    program prog    = program();
32
    bool is_pytorch = false;
Paul's avatar
Paul committed
33
34

    std::unordered_map<std::string, op_func> ops;
35
    std::unordered_map<std::string, operation> map_actv_funcs;
Paul's avatar
Paul committed
36
37
38

    onnx_parser()
    {
Shucai Xiao's avatar
Shucai Xiao committed
39
        add_generic_op("MatMul", op::dot{});
Khalique's avatar
Khalique committed
40
        add_generic_op("Relu", op::relu{});
Khalique's avatar
Khalique committed
41
42
        add_generic_op("Sigmoid", op::sigmoid{});
        add_generic_op("Abs", op::abs{});
Shucai Xiao's avatar
Shucai Xiao committed
43
44
        add_generic_op("Exp", op::exp{});
        add_generic_op("Log", op::log{});
Khalique's avatar
Khalique committed
45
46
        // disable dropout for inference
        add_generic_op("Dropout", op::identity{});
Khalique's avatar
Khalique committed
47
        add_generic_op("Identity", op::identity{});
Shucai Xiao's avatar
Shucai Xiao committed
48
49
50
        add_generic_op("Sin", op::sin{});
        add_generic_op("Cos", op::cos{});
        add_generic_op("Tan", op::tan{});
51
52
        add_generic_op("Sinh", op::sinh{});
        add_generic_op("Cosh", op::cosh{});
53
        add_generic_op("Tanh", op::tanh{});
54
55
56
        add_generic_op("Asin", op::asin{});
        add_generic_op("Acos", op::acos{});
        add_generic_op("Atan", op::atan{});
Paul's avatar
Paul committed
57

Khalique's avatar
Khalique committed
58
59
60
61
62
        add_binary_op("Add", op::add{});
        add_binary_op("Div", op::div{});
        add_binary_op("Mul", op::mul{});
        add_binary_op("Sub", op::sub{});

Khalique's avatar
Khalique committed
63
64
65
        add_variadic_op("Sum", op::add{});
        add_variadic_op("Max", op::max{});
        add_variadic_op("Min", op::min{});
Paul's avatar
Paul committed
66

Khalique's avatar
Khalique committed
67
        add_mem_op("LRN", &onnx_parser::parse_lrn);
Khalique's avatar
Khalique committed
68
        add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
69
        add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
Khalique's avatar
Khalique committed
70
        add_mem_op("Elu", &onnx_parser::parse_elu);
Paul's avatar
Paul committed
71
72
        add_mem_op("Constant", &onnx_parser::parse_constant);
        add_mem_op("Conv", &onnx_parser::parse_conv);
Paul's avatar
Paul committed
73
74
        add_mem_op("MaxPool", &onnx_parser::parse_pooling);
        add_mem_op("AveragePool", &onnx_parser::parse_pooling);
75
76
        add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
        add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
Paul's avatar
Paul committed
77
        add_mem_op("Reshape", &onnx_parser::parse_reshape);
Paul's avatar
Paul committed
78
79
        add_mem_op("Flatten", &onnx_parser::parse_flatten);
        add_mem_op("Gemm", &onnx_parser::parse_gemm);
80
        add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
Paul's avatar
Paul committed
81
        add_mem_op("Softmax", &onnx_parser::parse_softmax);
Shucai Xiao's avatar
Shucai Xiao committed
82
        add_mem_op("LogSoftmax", &onnx_parser::parse_logsoftmax);
83
84
85
        add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
        add_mem_op("Unsqueeze", &onnx_parser::parse_unsqueeze);
        add_mem_op("Slice", &onnx_parser::parse_slice);
Scott Thornton's avatar
Scott Thornton committed
86
        add_mem_op("Concat", &onnx_parser::parse_concat);
87
88
89
        add_mem_op("Gather", &onnx_parser::parse_gather);
        add_mem_op("Shape", &onnx_parser::parse_shape);
        add_mem_op("ConstantFill", &onnx_parser::parse_constant_fill);
Khalique's avatar
Khalique committed
90
        add_mem_op("Transpose", &onnx_parser::parse_transpose);
Shucai Xiao's avatar
Shucai Xiao committed
91
        add_mem_op("RNN", &onnx_parser::parse_rnn);
92
        add_mem_op("GRU", &onnx_parser::parse_gru);
Shucai Xiao's avatar
Shucai Xiao committed
93
        add_mem_op("LSTM", &onnx_parser::parse_lstm);
Khalique's avatar
Khalique committed
94
        add_mem_op("Pad", &onnx_parser::parse_pad);
95
96
97
98
99
100
101

        // init the activation function map
        init_actv_func();
    }

    void init_actv_func()
    {
102
103
104
105
106
        map_actv_funcs.insert(std::make_pair("tanh", op::tanh{}));
        map_actv_funcs.insert(std::make_pair("relu", op::relu{}));
        map_actv_funcs.insert(std::make_pair("sigmoid", op::sigmoid{}));
        map_actv_funcs.insert(std::make_pair("leakyrelu", op::leaky_relu{}));
        map_actv_funcs.insert(std::make_pair("elu", op::elu{}));
Paul's avatar
Paul committed
107
108
109
110
    }

    template <class F>
    void add_op(std::string name, F f)
Paul's avatar
Paul committed
111
112
113
114
115
116
117
118
119
    {
        ops.emplace(name, [=](auto&&... xs) {
            return std::vector<instruction_ref>{f(std::forward<decltype(xs)>(xs)...)};
        });
    }

    // Multi output op
    template <class F>
    void add_multi_op(std::string name, F f)
Paul's avatar
Paul committed
120
121
122
123
124
125
126
    {
        ops.emplace(name, f);
    }

    template <class F>
    void add_mem_op(std::string name, F f)
    {
Paul's avatar
Paul committed
127
        add_op(name, [=](auto&&... xs) {
Paul's avatar
Paul committed
128
129
130
            return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
        });
    }
Khalique's avatar
Khalique committed
131

132
    template <class T>
Khalique's avatar
Khalique committed
133
    void add_binary_op(std::string name, T x)
134
    {
Paul's avatar
Paul committed
135
        add_op(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
Scott Thornton's avatar
Scott Thornton committed
136
            if(args.size() != 2)
Paul's avatar
Paul committed
137
                MIGRAPHX_THROW("binary operators should have 2 operands");
138
            if(contains(attributes, "broadcast") and contains(attributes, "axis"))
139
140
141
142
            {
                uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
                if(broadcasted != 0)
                {
143
                    uint64_t axis = parse_value(attributes.at("axis")).at<uint64_t>();
144
145
146
147
                    auto l =
                        prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
                    return prog.add_instruction(x, args[0], l);
                }
148
                return prog.add_instruction(x, args);
149
            }
Paul's avatar
Paul committed
150
            else
151
            {
Khalique's avatar
Khalique committed
152
                return add_broadcastable_binary_op(args[0], args[1], x);
153
154
155
156
            }
        });
    }

Khalique's avatar
Khalique committed
157
158
159
160
161
    template <class T>
    instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x)
    {
        if(arg0->get_shape() != arg1->get_shape())
        {
Khalique's avatar
Khalique committed
162
163
164
165
166
167
168
169
170
171
172
173
174
            // Example:
            // s0 = (3,2,4,5) and s1 = (2,1,1)
            //
            // In this case we need to broadcast (:,1,1) portion of
            // s1 plus broadcast the 1st dimension of s1
            // giving output_lens = (3,2,4,5)
            //
            // Another example:
            // s0 = (3,2,1,5) and s1 = (2,7,5)
            // In this case we need to broadcast the (:,:,1:,:) axis
            // of s0 plus the 1st dimension of s1 giving
            // output_lens = (3,2,7,5)
            //
Khalique's avatar
Khalique committed
175
176
177
178
179
180
181
182
            // Get lengths for both arguments
            const std::vector<std::size_t>* s0 = &arg0->get_shape().lens();
            const std::vector<std::size_t>* s1 = &arg1->get_shape().lens();

            // Make sure s0 is the smaller size
            if(s0->size() > s1->size())
                std::swap(s0, s1);

Khalique's avatar
Khalique committed
183
            std::vector<std::size_t> output_lens(*s1);
Khalique's avatar
Khalique committed
184
185
            auto offset = s1->size() - s0->size();
            std::transform(s0->begin(),
Khalique's avatar
Khalique committed
186
187
188
189
                           s0->end(),
                           s1->begin() + offset,
                           output_lens.begin() + offset,
                           [](auto a, auto b) { return std::max(a, b); });
Khalique's avatar
Khalique committed
190
191
192
193
194
195
196
197
198

            auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, arg0);
            auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, arg1);
            return prog.add_instruction(x, l0, l1);
        }
        else
        {
            return prog.add_instruction(x, {arg0, arg1});
        }
199
200
    }

Paul's avatar
Paul committed
201
    template <class T>
Paul's avatar
Paul committed
202
203
    void add_generic_op(std::string name, T x)
    {
Paul's avatar
Paul committed
204
        add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Paul's avatar
Paul committed
205
206
207
208
            return prog.add_instruction(x, args);
        });
    }

Khalique's avatar
Khalique committed
209
    template <class T>
Khalique's avatar
Khalique committed
210
    void add_variadic_op(std::string name, T x)
Khalique's avatar
Khalique committed
211
    {
Paul's avatar
Paul committed
212
        add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Khalique's avatar
Khalique committed
213
            return std::accumulate(std::next(args.begin()),
Khalique's avatar
Khalique committed
214
215
216
217
218
                                   args.end(),
                                   args.front(),
                                   [this, x](instruction_ref a, instruction_ref b) {
                                       return add_broadcastable_binary_op(a, b, x);
                                   });
Khalique's avatar
Khalique committed
219
        });
Khalique's avatar
Khalique committed
220
221
    }

Paul's avatar
Paul committed
222
    instruction_ref
Paul's avatar
Paul committed
223
    parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
224
225
    {
        auto dims = args.front()->get_shape().lens();
Scott Thornton's avatar
Scott Thornton committed
226
227
        auto r =
            prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
228
229
        auto s = prog.add_instruction(op::softmax{}, r);
        return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
Paul's avatar
Paul committed
230
231
    }

Shucai Xiao's avatar
Shucai Xiao committed
232
233
234
    instruction_ref parse_logsoftmax(const std::string&,
                                     const attribute_map& attributes,
                                     std::vector<instruction_ref> args)
Shucai Xiao's avatar
Shucai Xiao committed
235
236
237
238
239
240
241
242
243
244
    {
        int axis = 1;
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }

        return prog.add_instruction(op::logsoftmax{axis}, std::move(args));
    }

Paul's avatar
Paul committed
245
    instruction_ref
Paul's avatar
Paul committed
246
    parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
247
    {
248
        op::convolution op;
249
        auto l0 = args[0];
Paul's avatar
Paul committed
250
251
        if(contains(attributes, "pads"))
        {
Scott Thornton's avatar
Scott Thornton committed
252
            if(contains(attributes, "auto_pad"))
253
            {
Paul's avatar
Paul committed
254
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
255
            }
256
257
            std::vector<std::int64_t> padding;
            copy(attributes["pads"].ints(), std::back_inserter(padding));
Scott Thornton's avatar
Scott Thornton committed
258
            if(padding.size() != 4)
259
            {
Paul's avatar
Paul committed
260
                MIGRAPHX_THROW("padding should have 4 values");
261
            }
Scott Thornton's avatar
Scott Thornton committed
262
            if(padding[0] != padding[2] || padding[1] != padding[3])
263
            {
264
265
                // insert zeros for pad op (args[0] has 4 dims)
                padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
Khalique's avatar
Khalique committed
266
                l0      = prog.add_instruction(op::pad{padding}, l0);
267
            }
268
269
270
271
            else
            {
                op.padding[0] = padding[0];
                op.padding[1] = padding[1];
272
            }
Paul's avatar
Paul committed
273
        }
Paul's avatar
Paul committed
274
275
276
277
278
279
280
281
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "dilations"))
        {
            copy(attributes["dilations"].ints(), op.dilation.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
282
        if(contains(attributes, "auto_pad"))
283
284
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
285
            if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
286
            {
Paul's avatar
Paul committed
287
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
288
289
            }

wsttiger's avatar
fixes  
wsttiger committed
290
            if(s.find("SAME") != std::string::npos)
291
            {
292
                op.padding_mode = op::padding_mode_t::same;
293
294
            }
        }
Khalique's avatar
Khalique committed
295
296
297
298
        if(contains(attributes, "group"))
        {
            op.group = parse_value(attributes.at("group")).at<int>();
        }
Paul's avatar
Paul committed
299
300
301
302
        if(args.size() == 3)
        {
            uint64_t axis = 1;
            auto l1       = prog.add_instruction(op, args[0], args[1]);
Scott Thornton's avatar
Scott Thornton committed
303
            auto l2       = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
304
            return prog.add_instruction(op::add{}, l1, l2);
Paul's avatar
Paul committed
305
        }
306
        return prog.add_instruction(op, l0, args[1]);
Paul's avatar
Paul committed
307
    }
Paul's avatar
Paul committed
308

Paul's avatar
Paul committed
309
310
311
    instruction_ref parse_pooling(const std::string& name,
                                  attribute_map attributes,
                                  std::vector<instruction_ref> args)
Paul's avatar
Paul committed
312
    {
Khalique's avatar
Khalique committed
313
        op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
314
        auto l0 = args[0];
Khalique's avatar
Khalique committed
315
        if(starts_with(name, "Global"))
316
        {
Khalique's avatar
Khalique committed
317
318
            auto lens  = args.front()->get_shape().lens();
            op.lengths = {lens[2], lens[3]};
319
        }
Paul's avatar
Paul committed
320
321
        if(contains(attributes, "pads"))
        {
322
323
            std::vector<std::int64_t> padding;
            copy(attributes["pads"].ints(), std::back_inserter(padding));
Scott Thornton's avatar
Scott Thornton committed
324
            if(padding.size() != 4)
325
            {
Paul's avatar
Paul committed
326
                MIGRAPHX_THROW("padding should have 4 values");
327
            }
Scott Thornton's avatar
Scott Thornton committed
328
            if(padding[0] != padding[2] || padding[1] != padding[3])
329
            {
330
331
                // insert zeros for pad op (args[0] has 4 dims)
                padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
Khalique's avatar
Khalique committed
332
                l0      = prog.add_instruction(op::pad{padding}, l0);
333
334
335
336
337
            }
            else
            {
                op.padding[0] = padding[0];
                op.padding[1] = padding[1];
338
            }
Paul's avatar
Paul committed
339
340
341
342
343
344
345
346
347
        }
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "kernel_shape"))
        {
            copy(attributes["kernel_shape"].ints(), op.lengths.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
348
        if(contains(attributes, "auto_pad"))
349
350
        {
            auto s = attributes["auto_pad"].s();
351
            if(s.find("SAME_UPPER") == std::string::npos)
352
            {
353
                MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
354
            }
355
            op.padding_mode = op::padding_mode_t::same;
356
357
        }

358
        return prog.add_instruction(op, l0);
Paul's avatar
Paul committed
359
360
    }

Paul's avatar
Paul committed
361
    instruction_ref
Paul's avatar
Paul committed
362
    parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
363
    {
364
        op::reshape op;
Paul's avatar
Paul committed
365
366
367
368
369
370
371
        if(args.size() == 1)
        {
            literal s = parse_value(attributes.at("shape"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
        }
        if(args.size() == 2)
        {
Paul's avatar
Paul committed
372
            literal s = args[1]->get_literal();
Paul's avatar
Paul committed
373
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
Paul's avatar
Paul committed
374
        }
Paul's avatar
Paul committed
375
376
377
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
378
    instruction_ref
Paul's avatar
Paul committed
379
    parse_flatten(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
380
    {
381
        uint64_t axis = 1;
Paul's avatar
Paul committed
382
383
384
385
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
386
        return prog.add_instruction(op::flatten{axis}, args[0]);
Paul's avatar
Paul committed
387
388
    }

389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
    instruction_ref
    parse_squeeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::squeeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

    instruction_ref
    parse_unsqueeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::unsqueeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

Scott Thornton's avatar
Scott Thornton committed
407
408
409
410
411
412
413
    instruction_ref
    parse_concat(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = parse_value(attributes.at("axis")).at<int>();
        op::concat op{axis};
        return prog.add_instruction(op, std::move(args));
    }
414

415
416
417
    instruction_ref
    parse_gather(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
418
        int axis = 0;
419
420
421
422
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
423
        op::gather op{axis};
424
425
426
        return prog.add_instruction(op, std::move(args));
    }

427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    instruction_ref
    parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::slice op;
        if(contains(attributes, "axes"))
        {
            literal s = parse_value(attributes.at("axes"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        }
        {
            literal s = parse_value(attributes.at("ends"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
        }
        {
            literal s = parse_value(attributes.at("starts"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
        }
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
447
448
449
    instruction_ref parse_constant(const std::string&,
                                   attribute_map attributes,
                                   const std::vector<instruction_ref>&)
Paul's avatar
Paul committed
450
    {
Shucai Xiao's avatar
Shucai Xiao committed
451
        literal v     = parse_value(attributes.at("value"));
452
453
454
        auto dim_size = attributes.at("value").t().dims_size();
        // if dim_size is 0, it is a scalar
        if(dim_size == 0)
455
        {
456
            migraphx::shape scalar_shape{v.get_shape().type()};
457
458
459
            return prog.add_literal(migraphx::literal{scalar_shape, v.data()});
        }

Paul's avatar
Paul committed
460
461
        return prog.add_literal(v);
    }
Paul's avatar
Paul committed
462

Paul's avatar
Paul committed
463
    instruction_ref
Paul's avatar
Paul committed
464
    parse_gemm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
465
466
    {
        float alpha = 1.0f;
Khalique's avatar
Khalique committed
467
        float beta  = 1.0f;
Paul's avatar
Paul committed
468
469
470
471
472
473
474
475
        bool transa = false;
        bool transb = false;
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        if(contains(attributes, "beta"))
        {
476
            beta = parse_value(attributes.at("beta")).at<float>();
Paul's avatar
Paul committed
477
478
479
480
481
482
483
484
485
        }
        if(contains(attributes, "transA"))
        {
            transa = parse_value(attributes.at("transA")).at<bool>();
        }
        if(contains(attributes, "transB"))
        {
            transb = parse_value(attributes.at("transB")).at<bool>();
        }
486

487
488
489
490
491
        std::vector<int64_t> perm(args[0]->get_shape().lens().size());
        std::iota(perm.begin(), perm.end(), int64_t{0});
        // swap the last two elements
        std::swap(*perm.rbegin(), *(perm.rbegin() + 1));

492
493
        auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
        auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
Paul's avatar
Paul committed
494
495
        if(args.size() == 3)
        {
Khalique's avatar
Khalique committed
496
            if(beta != 0.f)
497
            {
Khalique's avatar
Khalique committed
498
                auto l3 = prog.add_instruction(op::dot{alpha}, l1, l2);
Khalique's avatar
Khalique committed
499
                auto l4 = args[2];
Khalique's avatar
Khalique committed
500
                if(l4->get_shape().scalar()) // ignore args[2] (no C value added to alpha*A*B)
Khalique's avatar
Khalique committed
501
                    return l3;
Khalique's avatar
Khalique committed
502
                if(beta != 1.f)
Khalique's avatar
Khalique committed
503
504
                {
                    auto beta_val = prog.add_literal(beta);
Khalique's avatar
Khalique committed
505
506
                    auto l5 = prog.add_instruction(op::scalar{args[2]->get_shape()}, beta_val);
                    l4      = prog.add_instruction(op::mul{}, args[2], l5);
Khalique's avatar
Khalique committed
507
508
                }
                return add_broadcastable_binary_op(l3, l4, op::add{});
509
            }
Paul's avatar
Paul committed
510
        }
511
        return prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Paul's avatar
Paul committed
512
513
    }

514
    instruction_ref
Paul's avatar
Paul committed
515
    parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
516
    {
Scott Thornton's avatar
Scott Thornton committed
517
518
        float epsilon                                     = 1e-5f;
        float momentum                                    = 0.9f;
519
        op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
Scott Thornton's avatar
Scott Thornton committed
520
        bool is_test                                      = false;
521
522
523
524
525
526
        if(contains(attributes, "epsilon"))
        {
            epsilon = parse_value(attributes.at("epsilon")).at<float>();
        }
        if(contains(attributes, "momentum"))
        {
527
            momentum = parse_value(attributes.at("momentum")).at<float>();
528
529
530
        }
        if(contains(attributes, "is_test"))
        {
wsttiger's avatar
wsttiger committed
531
            is_test = parse_value(attributes.at("is_test")).at<uint64_t>() > 0;
532
533
534
        }
        if(contains(attributes, "spatial"))
        {
535
            bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
536
537
                          ? op::batch_norm_inference::spatial
                          : op::batch_norm_inference::per_activation;
538
        }
Paul's avatar
Paul committed
539
        (void)is_test;
Paul's avatar
Paul committed
540
        op::batch_norm_inference op{epsilon, momentum, bn_mode};
Paul's avatar
Paul committed
541
        return prog.add_instruction(op, std::move(args));
542
543
    }

544
545
546
547
    instruction_ref parse_leaky_relu(const std::string&,
                                     attribute_map attributes,
                                     std::vector<instruction_ref> args)
    {
Khalique's avatar
Khalique committed
548
        float alpha = 0.01; // default alpha val for leaky relu
549
550
551
552
553
554
555
556
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::leaky_relu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
557
558
    instruction_ref
    parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
559
560
561
562
563
564
565
566
567
568
    {
        float alpha = 1.0; // default alpha val for elu
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::elu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
569
570
    instruction_ref
    parse_lrn(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
571
572
    {
        float alpha = 0.0001;
Khalique's avatar
Khalique committed
573
574
575
        float beta  = 0.75;
        float bias  = 1.0;
        int size    = 1;
Khalique's avatar
Khalique committed
576
577
578
579
580
581
582
583
584
585
586
587
        if(contains(attributes, "alpha"))
            alpha = parse_value(attributes.at("alpha")).at<float>();
        if(contains(attributes, "beta"))
            beta = parse_value(attributes.at("beta")).at<float>();
        if(contains(attributes, "bias"))
            bias = parse_value(attributes.at("bias")).at<float>();
        if(contains(attributes, "size"))
            size = parse_value(attributes.at("size")).at<int>();
        op::lrn op{alpha, beta, bias, size};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
    instruction_ref parse_imagescaler(const std::string&,
                                      attribute_map attributes,
                                      std::vector<instruction_ref> args)
    {
        float scale = 1.0;
        std::vector<float> bias{};
        if(contains(attributes, "scale"))
        {
            scale = parse_value(attributes.at("scale")).at<float>();
        }

        if(contains(attributes, "bias"))
        {
            auto&& bias_floats = attributes["bias"].floats();
            bias               = std::vector<float>(bias_floats.begin(), bias_floats.end());
        }
        auto input_shape = args.front()->get_shape();
Khalique's avatar
Khalique committed
605

Khalique's avatar
Khalique committed
606
607
        auto scale_val = prog.add_literal(scale);
        auto bias_vals = prog.add_literal(
Paul's avatar
Paul committed
608
            migraphx::literal{migraphx::shape{migraphx::shape::float_type, {bias.size()}}, bias});
Khalique's avatar
Khalique committed
609

Paul's avatar
Paul committed
610
611
        auto scale_tensor = prog.add_instruction(migraphx::op::scalar{input_shape}, scale_val);
        auto img_scaled   = prog.add_instruction(migraphx::op::mul{}, args.front(), scale_tensor);
Paul's avatar
Paul committed
612
        auto bias_bcast = prog.add_instruction(migraphx::op::broadcast{1, input_shape}, bias_vals);
Paul's avatar
Paul committed
613
        return prog.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
Khalique's avatar
Khalique committed
614
    }
Khalique's avatar
Khalique committed
615

Khalique's avatar
Khalique committed
616
617
    instruction_ref
    parse_transpose(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
618
619
620
621
622
623
624
    {
        std::vector<int64_t> perm{};
        if(contains(attributes, "perm"))
        {
            auto&& perm_vals = attributes["perm"].ints();
            perm             = std::vector<int64_t>(perm_vals.begin(), perm_vals.end());
        }
Paul's avatar
Paul committed
625
        return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
Khalique's avatar
Khalique committed
626
627
    }

Khalique's avatar
Khalique committed
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
    instruction_ref
    parse_pad(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::vector<int64_t> pads{};
        float value = 0.0f;
        if(contains(attributes, "pads"))
        {
            auto&& pad_vals = attributes["pads"].ints();
            pads            = std::vector<int64_t>(pad_vals.begin(), pad_vals.end());
        }
        if(contains(attributes, "value"))
        {
            value = parse_value(attributes.at("value")).at<float>();
        }
        if(contains(attributes, "mode"))
        {
            auto mode = attributes.at("mode").s();
            if(mode != "constant")
                MIGRAPHX_THROW("migraphx currently only supports constant padding");
        }
        return prog.add_instruction(migraphx::op::pad{pads, value}, args.front());
    }
650
651
652
    // Use a literal instruction to replace the shape since, output of
    // shape operator are literals in migraphx
    instruction_ref
Shucai Xiao's avatar
Shucai Xiao committed
653
    parse_shape(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
654
655
    {
        if(args.size() != 1)
656
            MIGRAPHX_THROW("Shape: operator should have 1 operand");
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
        std::vector<std::size_t> arg_shape = args[0]->get_shape().lens();
        std::vector<int64_t> vec_shape(arg_shape.size());
        migraphx::shape s(migraphx::shape::int64_type, {arg_shape.size()});
        std::transform(arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) {
            return int64_t(i);
        });
        return prog.add_literal(migraphx::literal{s, vec_shape});
    }

    // Use a literal instruction to replace the constantFill operator. In RNN, input shape
    // and value are fixed, so no need to do the actual computation for the constantFill
    // operator
    instruction_ref parse_constant_fill(const std::string&,
                                        attribute_map attributes,
                                        std::vector<instruction_ref> args)
    {
        int input_as_shape = 0;
        int dtype          = 1;
        float value        = 0.0f;

        if(contains(attributes, "dtype"))
        {
            dtype = parse_value(attributes.at("dtype")).at<int>();
        }
        migraphx::shape::type_t type = get_type(dtype);

        if(contains(attributes, "input_as_shape"))
        {
            input_as_shape = parse_value(attributes.at("input_as_shape")).at<int>();
        }

        if(contains(attributes, "value"))
        {
            value = parse_value(attributes.at("value")).at<float>();
        }

Shucai Xiao's avatar
Shucai Xiao committed
693
694
        if(contains(attributes, "extra_shape"))
        {
695
            MIGRAPHX_THROW("ConstantFill: cannot handle extra shape attribute");
696
697
        }

698
699
        if(input_as_shape == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
700
            if(args.size() != 1)
701
            {
702
                MIGRAPHX_THROW("ConstantFill: need an input argument as output shape");
703
704
            }

Shucai Xiao's avatar
Shucai Xiao committed
705
706
            if(contains(attributes, "shape"))
            {
707
                MIGRAPHX_THROW("ConstantFill: cannot set the shape argument and pass in an input "
Shucai Xiao's avatar
Shucai Xiao committed
708
                               "at the same time");
709
710
            }

711
712
713
            migraphx::argument in = args[0]->eval();
            if(in.empty())
            {
714
                MIGRAPHX_THROW("ConstantFill: cannot handle dynamic shape as input");
715
            }
716

717
718
719
            std::vector<std::size_t> dims;
            in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
            migraphx::shape s(type, dims);
720
721
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
722
723
724
        }
        else if(input_as_shape == 0)
        {
Shucai Xiao's avatar
Shucai Xiao committed
725
726
            if(!contains(attributes, "shape"))
            {
727
                MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
728
729
730
            }

            literal ls = parse_value(attributes.at("shape"));
731
            std::vector<std::size_t> dims;
Shucai Xiao's avatar
Shucai Xiao committed
732
            ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
733
            migraphx::shape s{type, dims};
734
735
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
736
737
738
        }
        else
        {
739
            MIGRAPHX_THROW("ConstantFill: wrong value of attribute input_as_shape");
740
741
742
        }
    }

Shucai Xiao's avatar
Shucai Xiao committed
743
    std::vector<instruction_ref>
Shucai Xiao's avatar
Shucai Xiao committed
744
745
746
    parse_rnn(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
747
        std::size_t hidden_size     = args[1]->get_shape().lens()[1];
Shucai Xiao's avatar
Shucai Xiao committed
748
749
750

        if(contains(attributes, "hidden_size"))
        {
Shucai Xiao's avatar
Shucai Xiao committed
751
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
Shucai Xiao's avatar
Shucai Xiao committed
752
            if(hidden_size != hidden_size_att)
Shucai Xiao's avatar
Shucai Xiao committed
753
754
755
            {
                MIGRAPHX_THROW("RNN: hidden size mismatch in input and attribute");
            }
Shucai Xiao's avatar
Shucai Xiao committed
756
757
758
759
760
761
762
763
764
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

765
        op::rnn_direction dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
766
767
        if(direction == "bidirectional")
        {
768
            dirct = op::rnn_direction::bidirectional;
Shucai Xiao's avatar
Shucai Xiao committed
769
770
771
        }
        else if(direction == "reverse")
        {
772
            dirct = op::rnn_direction::reverse;
Shucai Xiao's avatar
Shucai Xiao committed
773
774
        }

775
776
777
778
779
        std::vector<std::string> vec_names{"tanh"};
        if(contains(attributes, "activations"))
        {
            auto names = attributes.at("activations").strings();
            vec_names.clear();
780
            vec_names.resize(names.size());
781
            std::copy(names.begin(), names.end(), vec_names.begin());
782
783
        }

784
785
786
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
787
        if(name_it != vec_names.end())
788
789
790
        {
            MIGRAPHX_THROW("RNN: activation function " + std::string(*name_it) + " not supported");
        }
791

Shucai Xiao's avatar
Shucai Xiao committed
792
        // bidirectional case should have two activation functions.
Shucai Xiao's avatar
Shucai Xiao committed
793
        // one is for forward, and the other is for reverse.
Shucai Xiao's avatar
Shucai Xiao committed
794
        // if only one actv function is provided, we use it in both
795
        // forward and reverse direction
796
        if(dirct == op::rnn_direction::bidirectional)
797
        {
Shucai Xiao's avatar
Shucai Xiao committed
798
            if(vec_names.size() == 1)
799
800
801
802
803
            {
                vec_names.push_back(vec_names.at(0));
            }
        }

Shucai Xiao's avatar
Shucai Xiao committed
804
805
806
        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& fn) {
            return map_actv_funcs[fn];
807
        });
Shucai Xiao's avatar
Shucai Xiao committed
808

Shucai Xiao's avatar
Shucai Xiao committed
809
810
811
812
813
814
815
        // To be added later
        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

816
817
        // if the number of arguments is less than 6, append
        // undefined operator to have 6 arguments
Shucai Xiao's avatar
Shucai Xiao committed
818
        if(args.size() < 6)
819
820
821
822
823
        {
            auto ins = prog.add_instruction(op::undefined{});
            args.insert(args.end(), (6 - args.size()), ins);
        }

Shucai Xiao's avatar
Shucai Xiao committed
824
825
        // first output for the concatenation of hidden states
        auto hidden_states = prog.add_instruction(op::rnn{hidden_size, vec_actv_funcs, dirct, clip},
Shucai Xiao's avatar
Shucai Xiao committed
826
                                                  std::move(args));
Shucai Xiao's avatar
Shucai Xiao committed
827

828
        // second output for the last hidden state
Shucai Xiao's avatar
Shucai Xiao committed
829
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
Shucai Xiao's avatar
Shucai Xiao committed
830

Shucai Xiao's avatar
Shucai Xiao committed
831
        return {hidden_states, last_output};
Shucai Xiao's avatar
Shucai Xiao committed
832
833
    }

834
    std::vector<instruction_ref>
835
836
837
838
839
840
841
    parse_gru(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
        std::size_t hidden_size     = args[2]->get_shape().lens()[2];

        if(contains(attributes, "hidden_size"))
        {
Shucai Xiao's avatar
Shucai Xiao committed
842
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
Shucai Xiao's avatar
Shucai Xiao committed
843
            if(hidden_size != hidden_size_att)
Shucai Xiao's avatar
Shucai Xiao committed
844
845
846
            {
                MIGRAPHX_THROW("GRU: hidden size mismatch in input and attribute");
            }
847
848
849
850
851
852
853
854
855
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

856
        op::rnn_direction dirct = op::rnn_direction::forward;
857
858
        if(direction == "bidirectional")
        {
859
            dirct = op::rnn_direction::bidirectional;
860
861
862
        }
        else if(direction == "reverse")
        {
863
            dirct = op::rnn_direction::reverse;
864
865
        }

866
        std::vector<std::string> vec_names = {"sigmoid", "tanh"};
867
868
        if(contains(attributes, "activations"))
        {
869
            auto names = attributes.at("activations").strings();
870
            vec_names.clear();
Shucai Xiao's avatar
Shucai Xiao committed
871
            vec_names.resize(names.size());
872
            std::copy(names.begin(), names.end(), vec_names.begin());
873
874
        }

875
        // need 4 activation functions
876
        if(dirct == op::rnn_direction::bidirectional)
877
        {
Shucai Xiao's avatar
Shucai Xiao committed
878
            // 4 activation functions are used in the bidirectional
879
            // scenario. No spec is provided in onnx::operator. we
Shucai Xiao's avatar
Shucai Xiao committed
880
881
            // use the algorithm that: if 1 actv function is provided,
            // repeat 1 four times. If 2 actv functins are provided,
882
883
            // assume forward and reverse use the same pair of actv
            // functions. For the case of 3 actv functions provided,
Shucai Xiao's avatar
Shucai Xiao committed
884
885
886
            // assume the 3rd one is repeated once and used by the
            // reverse direction.
            // This may need change later
887
            if(vec_names.size() == 1)
888
            {
889
                vec_names.insert(vec_names.end(), 3, vec_names.at(0));
890
            }
891
            else if(vec_names.size() == 2)
892
            {
893
894
895
                // repeat the activation functions
                vec_names.push_back(vec_names.at(0));
                vec_names.push_back(vec_names.at(1));
896
            }
897
            else if(vec_names.size() == 3)
898
            {
899
                vec_names.push_back(vec_names.at(2));
900
901
            }
        }
Shucai Xiao's avatar
Shucai Xiao committed
902
        else
903
        {
904
            if(vec_names.size() == 1)
905
            {
906
                vec_names.push_back(vec_names.at(0));
907
908
909
            }
        }

910
911
912
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
913
        if(name_it != vec_names.end())
914
915
916
        {
            MIGRAPHX_THROW("GRU: activation function " + std::string(*name_it) + " not supported");
        }
917

Shucai Xiao's avatar
Shucai Xiao committed
918
919
920
        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) {
            return map_actv_funcs[name];
Shucai Xiao's avatar
Shucai Xiao committed
921
        });
922
923
924
925
926
927
928
929

        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

        int linear_before_reset = 0;
Shucai Xiao's avatar
Shucai Xiao committed
930
        if(contains(attributes, "linear_before_reset"))
931
932
933
934
        {
            linear_before_reset = parse_value(attributes.at("linear_before_reset")).at<int>();
        }

Shucai Xiao's avatar
Shucai Xiao committed
935
        // append undefined opeator to make 6 arguments
Shucai Xiao's avatar
Shucai Xiao committed
936
        if(args.size() < 6)
Shucai Xiao's avatar
Shucai Xiao committed
937
938
939
940
941
        {
            auto ins = prog.add_instruction(op::undefined{});
            args.insert(args.end(), 6 - args.size(), ins);
        }

942
943
        // first output for concatenation of hidden states
        auto hidden_states = prog.add_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
944
            op::gru{hidden_size, vec_actv_funcs, dirct, clip, linear_before_reset},
Shucai Xiao's avatar
Shucai Xiao committed
945
            std::move(args));
946
947

        // second output for last gru output
948
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
949

Shucai Xiao's avatar
Shucai Xiao committed
950
        return {hidden_states, last_output};
951
952
    }

Shucai Xiao's avatar
Shucai Xiao committed
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
    std::vector<instruction_ref>
    parse_lstm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
        std::size_t hidden_size     = args[2]->get_shape().lens()[2];

        if(contains(attributes, "hidden_size"))
        {
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
            if(hidden_size != hidden_size_att)
            {
                MIGRAPHX_THROW("LSTM: hidden size mismatch in input and attribute");
            }
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

Shucai Xiao's avatar
Shucai Xiao committed
975
        op::rnn_direction dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
976
977
        if(direction == "bidirectional")
        {
Shucai Xiao's avatar
Shucai Xiao committed
978
            dirct = op::rnn_direction::bidirectional;
Shucai Xiao's avatar
Shucai Xiao committed
979
980
981
        }
        else if(direction == "reverse")
        {
Shucai Xiao's avatar
Shucai Xiao committed
982
            dirct = op::rnn_direction::reverse;
Shucai Xiao's avatar
Shucai Xiao committed
983
        }
Shucai Xiao's avatar
Shucai Xiao committed
984
        else if(direction == "forward")
Shucai Xiao's avatar
Shucai Xiao committed
985
        {
Shucai Xiao's avatar
Shucai Xiao committed
986
            dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
987
988
989
990
991
992
993
994
995
996
997
998
        }
        else
        {
            MIGRAPHX_THROW("LSTM: incorrect direction attribute");
        }

        std::vector<std::string> vec_names = {"sigmoid", "tanh", "tanh"};
        if(contains(attributes, "activations"))
        {
            auto names = attributes.at("activations").strings();
            vec_names.clear();
            vec_names.resize(names.size());
999
            std::copy(names.begin(), names.end(), vec_names.begin());
Shucai Xiao's avatar
Shucai Xiao committed
1000
1001
1002
        }

        // need 6 activation functions for bidirectional directions
Shucai Xiao's avatar
Shucai Xiao committed
1003
        if(dirct == op::rnn_direction::bidirectional)
Shucai Xiao's avatar
Shucai Xiao committed
1004
1005
1006
1007
1008
1009
        {
            // 6 activation functions are used in the bidirectional
            // scenario. No spec is provided in onnx::operator. we
            // use the algorithm that: if 1 actv function is provided,
            // repeat 1st six times. If 2 actv functins are provided,
            // repeat 2nd once, then repeat all three once
Shucai Xiao's avatar
Shucai Xiao committed
1010
            // if 3 actv funcs are provide, repeat all three once.
Shucai Xiao's avatar
Shucai Xiao committed
1011
1012
1013
1014
            // the same algorithm is used for 4, 5, and 6 actv funcions
            // provided. This may need change later
            switch(vec_names.size())
            {
1015
            case 1:
Shucai Xiao's avatar
Shucai Xiao committed
1016
1017
1018
1019
1020
1021
                vec_names = {vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0)};
1022
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1023
1024
1025

            case 2:
                // repeat the 2nd actv func once, then repeat all three another time
Shucai Xiao's avatar
Shucai Xiao committed
1026
1027
1028
1029
1030
1031
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(1),
                             vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(1)};
Shucai Xiao's avatar
Shucai Xiao committed
1032
1033
1034
1035
                break;

            case 3:
                // repeat all three actv funcs once
Shucai Xiao's avatar
Shucai Xiao committed
1036
1037
1038
1039
1040
1041
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2)};
Shucai Xiao's avatar
Shucai Xiao committed
1042
1043
                break;

Shucai Xiao's avatar
Shucai Xiao committed
1044
1045
1046
1047
1048
1049
1050
            case 4:
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(3),
                             vec_names.at(3),
                             vec_names.at(3)};
1051
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1052

Shucai Xiao's avatar
Shucai Xiao committed
1053
1054
1055
1056
1057
1058
1059
            case 5:
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(3),
                             vec_names.at(4),
                             vec_names.at(4)};
1060
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1061

Shucai Xiao's avatar
Shucai Xiao committed
1062
            default: break;
Shucai Xiao's avatar
Shucai Xiao committed
1063
1064
1065
1066
1067
1068
            }
        }
        else
        {
            switch(vec_names.size())
            {
Shucai Xiao's avatar
Shucai Xiao committed
1069
            case 1: vec_names = {vec_names.at(0), vec_names.at(0), vec_names.at(0)}; break;
Shucai Xiao's avatar
Shucai Xiao committed
1070
1071
1072

            case 2:
                // repeat the 2nd actv func once, so we have 3 actv funcs
Shucai Xiao's avatar
Shucai Xiao committed
1073
                vec_names = {vec_names.at(0), vec_names.at(1), vec_names.at(1)};
Shucai Xiao's avatar
Shucai Xiao committed
1074
1075
                break;

Shucai Xiao's avatar
Shucai Xiao committed
1076
            default: break;
Shucai Xiao's avatar
Shucai Xiao committed
1077
1078
1079
            }
        }

1080
1081
1082
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
1083
        if(name_it != vec_names.end())
1084
1085
1086
        {
            MIGRAPHX_THROW("LSTM: activation function " + std::string(*name_it) + " not supported");
        }
Shucai Xiao's avatar
Shucai Xiao committed
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108

        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) {
            return map_actv_funcs[name];
        });

        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

        int input_forget = 0;
        if(contains(attributes, "input_forget"))
        {
            input_forget = parse_value(attributes.at("input_forget")).at<int>();
        }

        // append undefined opeator to make 6 arguments
        if(args.size() < 8)
        {
            auto ins = prog.add_instruction(op::undefined{});
Shucai Xiao's avatar
Shucai Xiao committed
1109
            args.insert(args.end(), 8 - args.size(), ins);
Shucai Xiao's avatar
Shucai Xiao committed
1110
1111
1112
1113
        }

        // first output for concatenation of hidden states
        auto hidden_states = prog.add_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
1114
            op::lstm{hidden_size, vec_actv_funcs, dirct, clip, input_forget}, std::move(args));
Shucai Xiao's avatar
Shucai Xiao committed
1115
1116

        // second output for last lstm output
Shucai Xiao's avatar
Shucai Xiao committed
1117
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
Shucai Xiao's avatar
Shucai Xiao committed
1118
1119
1120
1121
1122
1123
1124

        // third output for last cell output
        auto last_cell_output = prog.add_instruction(op::lstm_last_cell_output{}, hidden_states);

        return {hidden_states, last_output, last_cell_output};
    }

Paul's avatar
Paul committed
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
    void parse_from(std::istream& is)
    {
        onnx::ModelProto model;
        if(model.ParseFromIstream(&is))
        {
            if(model.has_graph())
            {
                this->parse_graph(model.graph());
            }
        }
        else
        {
Paul's avatar
Paul committed
1137
            MIGRAPHX_THROW("Failed reading onnx file.");
Paul's avatar
Paul committed
1138
1139
1140
1141
1142
1143
        }
    }

    void parse_graph(const onnx::GraphProto& graph)
    {
        nodes = get_nodes(graph);
1144
1145
1146
1147
1148
        std::unordered_map<std::string, onnx::TensorProto> initializer_data;
        for(auto&& f : graph.initializer())
        {
            initializer_data[f.name()] = f;
        }
Paul's avatar
Paul committed
1149
1150
1151
        for(auto&& input : graph.input())
        {
            const std::string& name = input.name();
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
            // Does the input have an initializer?
            if(contains(initializer_data, name))
            {
                auto t             = initializer_data[name];
                instructions[name] = prog.add_literal(parse_tensor(t));
            }
            else
            {
                // TODO: Get shape of input parameter
                shape s            = parse_type(input.type());
                instructions[name] = prog.add_parameter(name, s);
            }
Paul's avatar
Paul committed
1164
1165
1166
        }
        for(auto&& p : nodes)
        {
Paul's avatar
Paul committed
1167
            this->parse_node(p.first);
Paul's avatar
Paul committed
1168
1169
1170
        }
    }

Shucai Xiao's avatar
Shucai Xiao committed
1171
    void parse_undefined(const std::string& name)
1172
    {
Shucai Xiao's avatar
Shucai Xiao committed
1173
        auto ins           = prog.add_instruction(op::undefined{});
1174
1175
1176
        instructions[name] = ins;
    }

Paul's avatar
Paul committed
1177
    void parse_node(const std::string& name)
Paul's avatar
Paul committed
1178
    {
Paul's avatar
Paul committed
1179
        if(name.empty())
Paul's avatar
Paul committed
1180
            MIGRAPHX_THROW("Onnx node must have a name");
Paul's avatar
Paul committed
1181
1182
1183
1184
1185
1186
1187
1188
        if(instructions.count(name) == 0)
        {
            auto&& node = nodes.at(name);
            std::vector<instruction_ref> args;
            for(auto&& input : node.input())
            {
                if(nodes.count(input) > 0)
                {
Paul's avatar
Paul committed
1189
1190
                    assert(name != input);
                    this->parse_node(input);
Paul's avatar
Paul committed
1191
                }
Shucai Xiao's avatar
Shucai Xiao committed
1192
                else if(input.empty())
Paul's avatar
Paul committed
1193
                {
1194
                    this->parse_undefined(input);
Paul's avatar
Paul committed
1195
                }
1196
                args.push_back(instructions.at(input));
Paul's avatar
Paul committed
1197
            }
Paul's avatar
Paul committed
1198
            std::vector<instruction_ref> result;
Paul's avatar
Paul committed
1199
1200
            if(ops.count(node.op_type()) == 0)
            {
Paul's avatar
Paul committed
1201
                result.push_back(prog.add_instruction(unknown{node.op_type()}, args));
Paul's avatar
Paul committed
1202
1203
1204
            }
            else
            {
Paul's avatar
Paul committed
1205
                result = ops[node.op_type()](get_attributes(node), args);
Paul's avatar
Paul committed
1206
            }
Paul's avatar
Paul committed
1207
            // Even no output nodes produce output in migraphx
Paul's avatar
Paul committed
1208
            if(node.output().empty() and result.size() == 1)
Paul's avatar
Paul committed
1209
1210
            {
                instructions[name] = result.front();
Paul's avatar
Paul committed
1211
1212
1213
            }
            else
            {
Paul's avatar
Paul committed
1214
1215
1216
1217
1218
1219
                assert(node.output().size() >= result.size());
                std::transform(result.begin(),
                               result.end(),
                               node.output().begin(),
                               std::inserter(instructions, instructions.end()),
                               [](auto&& x, auto&& y) { return std::make_pair(y, x); });
Paul's avatar
Paul committed
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
            }
        }
    }

    static attribute_map get_attributes(const onnx::NodeProto& node)
    {
        std::unordered_map<std::string, onnx::AttributeProto> result;
        for(auto&& attr : node.attribute())
        {
            result[attr.name()] = attr;
        }
        return result;
    }

    static node_map get_nodes(const onnx::GraphProto& graph)
    {
        std::unordered_map<std::string, onnx::NodeProto> result;
Paul's avatar
Paul committed
1237
        std::size_t n = 0;
Paul's avatar
Paul committed
1238
1239
        for(auto&& node : graph.node())
        {
Paul's avatar
Paul committed
1240
            if(node.output().empty())
Paul's avatar
Paul committed
1241
            {
Paul's avatar
Paul committed
1242
                if(node.name().empty())
Paul's avatar
Paul committed
1243
1244
1245
1246
1247
1248
1249
1250
1251
                {
                    result["migraphx_unamed_node_" + std::to_string(n)] = node;
                    n++;
                }
                else
                {
                    result[node.name()] = node;
                }
            }
Paul's avatar
Paul committed
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
            for(auto&& output : node.output())
            {
                result[output] = node;
            }
        }
        return result;
    }

    template <class T>
    static literal from_repeated(shape::type_t t, const T& r)
    {
        std::size_t size = r.size();
        return literal{{t, {size}}, r.begin(), r.end()};
    }

    static literal parse_value(const onnx::AttributeProto& attr)
    {
        switch(attr.type())
        {
        case onnx::AttributeProto::UNDEFINED: return {};
        case onnx::AttributeProto::FLOAT: return literal{attr.f()};
        case onnx::AttributeProto::INT: return literal{attr.i()};
        case onnx::AttributeProto::STRING: return {};
        case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
        case onnx::AttributeProto::GRAPH: return {};
Paul's avatar
Paul committed
1277
        case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
Paul's avatar
Paul committed
1278
1279
1280
1281
1282
        case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
        case onnx::AttributeProto::STRINGS: return {};
        case onnx::AttributeProto::TENSORS: return {};
        case onnx::AttributeProto::GRAPHS: return {};
        }
Paul's avatar
Paul committed
1283
        MIGRAPHX_THROW("Invalid attribute type");
Paul's avatar
Paul committed
1284
1285
1286
1287
1288
    }

    static literal parse_tensor(const onnx::TensorProto& t)
    {
        std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
Khalique's avatar
Khalique committed
1289
        // in case of scalar constants in onnx file, use dims=1 to fill initializer data
1290
        if(dims.empty())
Khalique's avatar
Khalique committed
1291
1292
1293
        {
            dims = {1};
        }
1294
1295
        if(t.has_raw_data())
        {
wsttiger's avatar
wsttiger committed
1296
            const std::string& s = t.raw_data();
Scott Thornton's avatar
Scott Thornton committed
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
            switch(t.data_type())
            {
            case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
            case onnx::TensorProto::FLOAT: return literal{{shape::float_type, dims}, s.data()};
            case onnx::TensorProto::UINT8: throw std::runtime_error("");
            case onnx::TensorProto::INT8: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::UINT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT32: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT64: return literal{{shape::int64_type, dims}, s.data()};
            case onnx::TensorProto::STRING: throw std::runtime_error("");
            case onnx::TensorProto::BOOL: return literal{{shape::int32_type, dims}, s.data()};
Paul's avatar
Paul committed
1309
            case onnx::TensorProto::FLOAT16: return literal{{shape::half_type, dims}, s.data()};
Scott Thornton's avatar
Scott Thornton committed
1310
1311
1312
1313
1314
1315
            case onnx::TensorProto::DOUBLE: return literal{{shape::double_type, dims}, s.data()};
            case onnx::TensorProto::UINT32: throw std::runtime_error("");
            case onnx::TensorProto::UINT64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
            }
Paul's avatar
Paul committed
1316
            MIGRAPHX_THROW("Invalid tensor type");
1317
        }
Paul's avatar
Paul committed
1318
1319
1320
1321
        switch(t.data_type())
        {
        case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
        case onnx::TensorProto::FLOAT:
Paul's avatar
Paul committed
1322
            return literal{{shape::float_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
1323
1324
        case onnx::TensorProto::UINT8: throw std::runtime_error("");
        case onnx::TensorProto::INT8:
Paul's avatar
Paul committed
1325
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1326
        case onnx::TensorProto::UINT16:
Paul's avatar
Paul committed
1327
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1328
        case onnx::TensorProto::INT16:
Paul's avatar
Paul committed
1329
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1330
        case onnx::TensorProto::INT32:
Paul's avatar
Paul committed
1331
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1332
        case onnx::TensorProto::INT64:
Paul's avatar
Paul committed
1333
            return literal{{shape::int64_type, dims}, t.int64_data().begin(), t.int64_data().end()};
Paul's avatar
Paul committed
1334
1335
        case onnx::TensorProto::STRING: throw std::runtime_error("");
        case onnx::TensorProto::BOOL:
Paul's avatar
Paul committed
1336
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1337
        case onnx::TensorProto::FLOAT16:
Khalique's avatar
Khalique committed
1338
        {
Khalique's avatar
Khalique committed
1339
            std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
1340
            std::vector<half> data_half;
Khalique's avatar
Khalique committed
1341
1342
1343
            std::transform(data_uint16.begin(),
                           data_uint16.end(),
                           std::back_inserter(data_half),
1344
                           [](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); });
1345
            return literal{{shape::half_type, dims}, data_half.begin(), data_half.end()};
Khalique's avatar
Khalique committed
1346
        }
Paul's avatar
Paul committed
1347
1348
1349
1350
1351
1352
1353
1354
        case onnx::TensorProto::DOUBLE:
            return literal{
                {shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
        case onnx::TensorProto::UINT32: throw std::runtime_error("");
        case onnx::TensorProto::UINT64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
        }
Paul's avatar
Paul committed
1355
        MIGRAPHX_THROW("Invalid tensor type");
Paul's avatar
Paul committed
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
    }

    static shape parse_type(const onnx::TypeProto& t)
    {
        shape::type_t shape_type{};
        switch(t.tensor_type().elem_type())
        {
        case onnx::TensorProto::UNDEFINED:
            break; // throw std::runtime_error("Unsupported type UNDEFINED");
        case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
        case onnx::TensorProto::UINT8:
            break; // throw std::runtime_error("Unsupported type UINT8");
        case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
        case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
        case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
        case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
        case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
        case onnx::TensorProto::STRING:
            break; // throw std::runtime_error("Unsupported type STRING");
        case onnx::TensorProto::BOOL:
            break; // throw std::runtime_error("Unsupported type BOOL");
Paul's avatar
Paul committed
1377
        case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
Paul's avatar
Paul committed
1378
1379
1380
1381
1382
1383
1384
1385
1386
        case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
        case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
        case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
        case onnx::TensorProto::COMPLEX64:
            break; // throw std::runtime_error("Unsupported type COMPLEX64");
        case onnx::TensorProto::COMPLEX128:
            break; // throw std::runtime_error("Unsupported type COMPLEX128");
        }
        std::vector<std::size_t> dims;
Paul's avatar
Paul committed
1387
        auto&& tensor_dims = t.tensor_type().shape().dim();
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
        std::transform(tensor_dims.begin(),
                       tensor_dims.end(),
                       std::back_inserter(dims),
                       [](auto&& d) -> std::size_t {
                           if(not d.has_dim_value())
                           {
                               long default_batch_size = 1; // FIXME
                               return default_batch_size;
                           }
                           return d.dim_value();
                       });
Paul's avatar
Paul committed
1399
1400
        return {shape_type, dims};
    }
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422

    shape::type_t get_type(int dtype)
    {
        switch(dtype)
        {
        case 1: return shape::float_type;
        case 2: return shape::uint8_type;
        case 3: return shape::int8_type;
        case 4: return shape::uint16_type;
        case 5: return shape::int16_type;
        case 6: return shape::int32_type;
        case 7: return shape::int64_type;
        case 10: return shape::half_type;
        case 11: return shape::double_type;
        case 12: return shape::uint32_type;
        case 13: return shape::uint64_type;
        default:
        {
            MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
        }
        }
    }
Paul's avatar
Paul committed
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
};

program parse_onnx(const std::string& name)
{
    std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
    onnx_parser parser;
#ifndef NDEBUG
    // Log the program when it can't be parsed
    try
    {
        parser.parse_from(input);
    }
    catch(...)
    {
        std::cerr << parser.prog << std::endl;
        throw;
    }
#else
    parser.parse_from(input);
#endif
    return std::move(parser.prog);
}

Paul's avatar
Paul committed
1446
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
1447
} // namespace migraphx