"src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py" did not exist on "50e18ee6982f8006e7247c756c573b8204fe354b"
onnx.cpp 53.3 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
4
5
6
7
8
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <functional>
#include <array>
Paul's avatar
Paul committed
9
#include <utility>
10
#include <vector>
Paul's avatar
Paul committed
11

Paul's avatar
Paul committed
12
13
14
15
16
17
#include <migraphx/fallthrough.hpp>
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
18
#include <migraphx/onnx.hpp>
Paul's avatar
Paul committed
19
20

namespace migraphx {
Paul's avatar
Paul committed
21
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
22
23
24
25
26

struct onnx_parser
{
    using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
    using node_map      = std::unordered_map<std::string, onnx::NodeProto>;
Paul's avatar
Paul committed
27
28
    using op_func =
        std::function<std::vector<instruction_ref>(attribute_map, std::vector<instruction_ref>)>;
Paul's avatar
Paul committed
29
30
    node_map nodes;
    std::unordered_map<std::string, instruction_ref> instructions;
Scott Thornton's avatar
Scott Thornton committed
31
    program prog    = program();
32
    bool is_pytorch = false;
Paul's avatar
Paul committed
33
34

    std::unordered_map<std::string, op_func> ops;
35
    std::unordered_map<std::string, operation> map_actv_funcs;
Paul's avatar
Paul committed
36
37
38

    onnx_parser()
    {
Shucai Xiao's avatar
Shucai Xiao committed
39
        add_generic_op("MatMul", op::dot{});
Khalique's avatar
Khalique committed
40
        add_generic_op("Relu", op::relu{});
Khalique's avatar
Khalique committed
41
42
        add_generic_op("Sigmoid", op::sigmoid{});
        add_generic_op("Abs", op::abs{});
Shucai Xiao's avatar
Shucai Xiao committed
43
44
        add_generic_op("Exp", op::exp{});
        add_generic_op("Log", op::log{});
Khalique's avatar
Khalique committed
45
46
        // disable dropout for inference
        add_generic_op("Dropout", op::identity{});
Khalique's avatar
Khalique committed
47
        add_generic_op("Identity", op::identity{});
Shucai Xiao's avatar
Shucai Xiao committed
48
49
50
        add_generic_op("Sin", op::sin{});
        add_generic_op("Cos", op::cos{});
        add_generic_op("Tan", op::tan{});
51
52
        add_generic_op("Sinh", op::sinh{});
        add_generic_op("Cosh", op::cosh{});
53
        add_generic_op("Tanh", op::tanh{});
54
55
56
        add_generic_op("Asin", op::asin{});
        add_generic_op("Acos", op::acos{});
        add_generic_op("Atan", op::atan{});
Paul's avatar
Paul committed
57

Khalique's avatar
Khalique committed
58
59
60
61
62
        add_binary_op("Add", op::add{});
        add_binary_op("Div", op::div{});
        add_binary_op("Mul", op::mul{});
        add_binary_op("Sub", op::sub{});

Khalique's avatar
Khalique committed
63
64
65
        add_variadic_op("Sum", op::add{});
        add_variadic_op("Max", op::max{});
        add_variadic_op("Min", op::min{});
Paul's avatar
Paul committed
66

Khalique's avatar
Khalique committed
67
        add_mem_op("LRN", &onnx_parser::parse_lrn);
Khalique's avatar
Khalique committed
68
        add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
69
        add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
Khalique's avatar
Khalique committed
70
        add_mem_op("Elu", &onnx_parser::parse_elu);
Paul's avatar
Paul committed
71
72
        add_mem_op("Constant", &onnx_parser::parse_constant);
        add_mem_op("Conv", &onnx_parser::parse_conv);
Paul's avatar
Paul committed
73
74
        add_mem_op("MaxPool", &onnx_parser::parse_pooling);
        add_mem_op("AveragePool", &onnx_parser::parse_pooling);
75
76
        add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
        add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
Paul's avatar
Paul committed
77
        add_mem_op("Reshape", &onnx_parser::parse_reshape);
Paul's avatar
Paul committed
78
79
        add_mem_op("Flatten", &onnx_parser::parse_flatten);
        add_mem_op("Gemm", &onnx_parser::parse_gemm);
80
        add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
Paul's avatar
Paul committed
81
        add_mem_op("Softmax", &onnx_parser::parse_softmax);
Shucai Xiao's avatar
Shucai Xiao committed
82
        add_mem_op("LogSoftmax", &onnx_parser::parse_logsoftmax);
83
84
85
        add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
        add_mem_op("Unsqueeze", &onnx_parser::parse_unsqueeze);
        add_mem_op("Slice", &onnx_parser::parse_slice);
Scott Thornton's avatar
Scott Thornton committed
86
        add_mem_op("Concat", &onnx_parser::parse_concat);
87
88
89
        add_mem_op("Gather", &onnx_parser::parse_gather);
        add_mem_op("Shape", &onnx_parser::parse_shape);
        add_mem_op("ConstantFill", &onnx_parser::parse_constant_fill);
Khalique's avatar
Khalique committed
90
        add_mem_op("Transpose", &onnx_parser::parse_transpose);
Shucai Xiao's avatar
Shucai Xiao committed
91
        add_mem_op("RNN", &onnx_parser::parse_rnn);
92
        add_mem_op("GRU", &onnx_parser::parse_gru);
Shucai Xiao's avatar
Shucai Xiao committed
93
        add_mem_op("LSTM", &onnx_parser::parse_lstm);
Khalique's avatar
Khalique committed
94
        add_mem_op("Pad", &onnx_parser::parse_pad);
95
96
97
98
99
100
101

        // init the activation function map
        init_actv_func();
    }

    void init_actv_func()
    {
102
103
104
105
106
        map_actv_funcs.insert(std::make_pair("tanh", op::tanh{}));
        map_actv_funcs.insert(std::make_pair("relu", op::relu{}));
        map_actv_funcs.insert(std::make_pair("sigmoid", op::sigmoid{}));
        map_actv_funcs.insert(std::make_pair("leakyrelu", op::leaky_relu{}));
        map_actv_funcs.insert(std::make_pair("elu", op::elu{}));
Paul's avatar
Paul committed
107
108
109
110
    }

    template <class F>
    void add_op(std::string name, F f)
Paul's avatar
Paul committed
111
112
113
114
115
116
117
118
119
    {
        ops.emplace(name, [=](auto&&... xs) {
            return std::vector<instruction_ref>{f(std::forward<decltype(xs)>(xs)...)};
        });
    }

    // Multi output op
    template <class F>
    void add_multi_op(std::string name, F f)
Paul's avatar
Paul committed
120
121
122
123
124
125
126
    {
        ops.emplace(name, f);
    }

    template <class F>
    void add_mem_op(std::string name, F f)
    {
Paul's avatar
Paul committed
127
        add_op(name, [=](auto&&... xs) {
Paul's avatar
Paul committed
128
129
130
            return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
        });
    }
Khalique's avatar
Khalique committed
131

132
    template <class T>
Khalique's avatar
Khalique committed
133
    void add_binary_op(std::string name, T x)
134
    {
Paul's avatar
Paul committed
135
        add_op(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
Scott Thornton's avatar
Scott Thornton committed
136
            if(args.size() != 2)
Paul's avatar
Paul committed
137
                MIGRAPHX_THROW("binary operators should have 2 operands");
138
            if(contains(attributes, "broadcast") and contains(attributes, "axis"))
139
140
141
142
            {
                uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
                if(broadcasted != 0)
                {
143
                    uint64_t axis = parse_value(attributes.at("axis")).at<uint64_t>();
144
145
146
147
                    auto l =
                        prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
                    return prog.add_instruction(x, args[0], l);
                }
148
                return prog.add_instruction(x, args);
149
            }
Paul's avatar
Paul committed
150
            else
151
            {
Khalique's avatar
Khalique committed
152
                return add_broadcastable_binary_op(args[0], args[1], x);
153
154
155
156
            }
        });
    }

Khalique's avatar
Khalique committed
157
158
159
160
161
    template <class T>
    instruction_ref add_broadcastable_binary_op(instruction_ref arg0, instruction_ref arg1, T x)
    {
        if(arg0->get_shape() != arg1->get_shape())
        {
Khalique's avatar
Khalique committed
162
163
164
165
166
167
168
169
170
171
172
173
174
            // Example:
            // s0 = (3,2,4,5) and s1 = (2,1,1)
            //
            // In this case we need to broadcast (:,1,1) portion of
            // s1 plus broadcast the 1st dimension of s1
            // giving output_lens = (3,2,4,5)
            //
            // Another example:
            // s0 = (3,2,1,5) and s1 = (2,7,5)
            // In this case we need to broadcast the (:,:,1:,:) axis
            // of s0 plus the 1st dimension of s1 giving
            // output_lens = (3,2,7,5)
            //
Khalique's avatar
Khalique committed
175
176
177
178
179
180
181
182
            // Get lengths for both arguments
            const std::vector<std::size_t>* s0 = &arg0->get_shape().lens();
            const std::vector<std::size_t>* s1 = &arg1->get_shape().lens();

            // Make sure s0 is the smaller size
            if(s0->size() > s1->size())
                std::swap(s0, s1);

Khalique's avatar
Khalique committed
183
            std::vector<std::size_t> output_lens(*s1);
Khalique's avatar
Khalique committed
184
185
            auto offset = s1->size() - s0->size();
            std::transform(s0->begin(),
Khalique's avatar
Khalique committed
186
187
188
189
                           s0->end(),
                           s1->begin() + offset,
                           output_lens.begin() + offset,
                           [](auto a, auto b) { return std::max(a, b); });
Khalique's avatar
Khalique committed
190
191
192
193
194
195
196
197
198

            auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, arg0);
            auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, arg1);
            return prog.add_instruction(x, l0, l1);
        }
        else
        {
            return prog.add_instruction(x, {arg0, arg1});
        }
199
200
    }

Paul's avatar
Paul committed
201
    template <class T>
Paul's avatar
Paul committed
202
203
    void add_generic_op(std::string name, T x)
    {
Paul's avatar
Paul committed
204
        add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Paul's avatar
Paul committed
205
206
207
208
            return prog.add_instruction(x, args);
        });
    }

Khalique's avatar
Khalique committed
209
    template <class T>
Khalique's avatar
Khalique committed
210
    void add_variadic_op(std::string name, T x)
Khalique's avatar
Khalique committed
211
    {
Paul's avatar
Paul committed
212
        add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Khalique's avatar
Khalique committed
213
            return std::accumulate(std::next(args.begin()),
Khalique's avatar
Khalique committed
214
215
216
217
218
                                   args.end(),
                                   args.front(),
                                   [this, x](instruction_ref a, instruction_ref b) {
                                       return add_broadcastable_binary_op(a, b, x);
                                   });
Khalique's avatar
Khalique committed
219
        });
Khalique's avatar
Khalique committed
220
221
    }

Paul's avatar
Paul committed
222
    instruction_ref
Paul's avatar
Paul committed
223
    parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
224
225
    {
        auto dims = args.front()->get_shape().lens();
Scott Thornton's avatar
Scott Thornton committed
226
227
        auto r =
            prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
228
229
        auto s = prog.add_instruction(op::softmax{}, r);
        return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
Paul's avatar
Paul committed
230
231
    }

Shucai Xiao's avatar
Shucai Xiao committed
232
233
234
    instruction_ref parse_logsoftmax(const std::string&,
                                     const attribute_map& attributes,
                                     std::vector<instruction_ref> args)
Shucai Xiao's avatar
Shucai Xiao committed
235
236
237
238
239
240
241
242
243
244
    {
        int axis = 1;
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }

        return prog.add_instruction(op::logsoftmax{axis}, std::move(args));
    }

Paul's avatar
Paul committed
245
    instruction_ref
Paul's avatar
Paul committed
246
    parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
247
    {
248
        op::convolution op;
249
        auto l0 = args[0];
Paul's avatar
Paul committed
250
251
        if(contains(attributes, "pads"))
        {
Scott Thornton's avatar
Scott Thornton committed
252
            if(contains(attributes, "auto_pad"))
253
            {
Paul's avatar
Paul committed
254
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
255
            }
256
257
            std::vector<std::int64_t> padding;
            copy(attributes["pads"].ints(), std::back_inserter(padding));
Scott Thornton's avatar
Scott Thornton committed
258
            if(padding.size() != 4)
259
            {
Paul's avatar
Paul committed
260
                MIGRAPHX_THROW("padding should have 4 values");
261
            }
Scott Thornton's avatar
Scott Thornton committed
262
            if(padding[0] != padding[2] || padding[1] != padding[3])
263
            {
264
265
                // insert zeros for pad op (args[0] has 4 dims)
                padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
Khalique's avatar
Khalique committed
266
                l0      = prog.add_instruction(op::pad{padding}, l0);
267
            }
268
269
270
271
            else
            {
                op.padding[0] = padding[0];
                op.padding[1] = padding[1];
272
            }
Paul's avatar
Paul committed
273
        }
Paul's avatar
Paul committed
274
275
276
277
278
279
280
281
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "dilations"))
        {
            copy(attributes["dilations"].ints(), op.dilation.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
282
        if(contains(attributes, "auto_pad"))
283
284
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
285
            if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
286
            {
Paul's avatar
Paul committed
287
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
288
289
            }

wsttiger's avatar
fixes  
wsttiger committed
290
            if(s.find("SAME") != std::string::npos)
291
            {
292
                op.padding_mode = op::padding_mode_t::same;
293
294
            }
        }
Khalique's avatar
Khalique committed
295
296
297
298
        if(contains(attributes, "group"))
        {
            op.group = parse_value(attributes.at("group")).at<int>();
        }
Paul's avatar
Paul committed
299
300
301
302
        if(args.size() == 3)
        {
            uint64_t axis = 1;
            auto l1       = prog.add_instruction(op, args[0], args[1]);
Scott Thornton's avatar
Scott Thornton committed
303
            auto l2       = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
304
            return prog.add_instruction(op::add{}, l1, l2);
Paul's avatar
Paul committed
305
        }
306
        return prog.add_instruction(op, l0, args[1]);
Paul's avatar
Paul committed
307
    }
Paul's avatar
Paul committed
308

Paul's avatar
Paul committed
309
310
311
    instruction_ref parse_pooling(const std::string& name,
                                  attribute_map attributes,
                                  std::vector<instruction_ref> args)
Paul's avatar
Paul committed
312
    {
Khalique's avatar
Khalique committed
313
        op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
314
        auto l0 = args[0];
Khalique's avatar
Khalique committed
315
        if(starts_with(name, "Global"))
316
        {
Khalique's avatar
Khalique committed
317
318
            auto lens  = args.front()->get_shape().lens();
            op.lengths = {lens[2], lens[3]};
319
        }
Paul's avatar
Paul committed
320
321
        if(contains(attributes, "pads"))
        {
322
323
            std::vector<std::int64_t> padding;
            copy(attributes["pads"].ints(), std::back_inserter(padding));
Scott Thornton's avatar
Scott Thornton committed
324
            if(padding.size() != 4)
325
            {
Paul's avatar
Paul committed
326
                MIGRAPHX_THROW("padding should have 4 values");
327
            }
Scott Thornton's avatar
Scott Thornton committed
328
            if(padding[0] != padding[2] || padding[1] != padding[3])
329
            {
330
331
                // insert zeros for pad op (args[0] has 4 dims)
                padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
Khalique's avatar
Khalique committed
332
                l0      = prog.add_instruction(op::pad{padding}, l0);
333
334
335
336
337
            }
            else
            {
                op.padding[0] = padding[0];
                op.padding[1] = padding[1];
338
            }
Paul's avatar
Paul committed
339
340
341
342
343
344
345
346
347
        }
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "kernel_shape"))
        {
            copy(attributes["kernel_shape"].ints(), op.lengths.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
348
        if(contains(attributes, "auto_pad"))
349
350
        {
            auto s = attributes["auto_pad"].s();
351
            if(s.find("SAME_UPPER") == std::string::npos)
352
            {
353
                MIGRAPHX_THROW("auto_pad only supports SAME_UPPER for pooling");
354
            }
355
            op.padding_mode = op::padding_mode_t::same;
356
357
        }

358
        return prog.add_instruction(op, l0);
Paul's avatar
Paul committed
359
360
    }

Paul's avatar
Paul committed
361
    instruction_ref
Paul's avatar
Paul committed
362
    parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
363
    {
364
        op::reshape op;
Paul's avatar
Paul committed
365
366
367
368
369
370
371
        if(args.size() == 1)
        {
            literal s = parse_value(attributes.at("shape"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
        }
        if(args.size() == 2)
        {
Paul's avatar
Paul committed
372
            literal s = args[1]->get_literal();
Paul's avatar
Paul committed
373
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
Paul's avatar
Paul committed
374
        }
Paul's avatar
Paul committed
375
376
377
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
378
    instruction_ref
Paul's avatar
Paul committed
379
    parse_flatten(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
380
    {
381
        uint64_t axis = 1;
Paul's avatar
Paul committed
382
383
384
385
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
386
        return prog.add_instruction(op::flatten{axis}, args[0]);
Paul's avatar
Paul committed
387
388
    }

389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
    instruction_ref
    parse_squeeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::squeeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

    instruction_ref
    parse_unsqueeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::unsqueeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

Scott Thornton's avatar
Scott Thornton committed
407
408
409
410
411
412
413
    instruction_ref
    parse_concat(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = parse_value(attributes.at("axis")).at<int>();
        op::concat op{axis};
        return prog.add_instruction(op, std::move(args));
    }
414

415
416
417
    instruction_ref
    parse_gather(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
418
        int axis = 0;
419
420
421
422
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
423
        op::gather op{axis};
424
425
426
        return prog.add_instruction(op, std::move(args));
    }

427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    instruction_ref
    parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::slice op;
        if(contains(attributes, "axes"))
        {
            literal s = parse_value(attributes.at("axes"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        }
        {
            literal s = parse_value(attributes.at("ends"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
        }
        {
            literal s = parse_value(attributes.at("starts"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
        }
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
447
448
449
    instruction_ref parse_constant(const std::string&,
                                   attribute_map attributes,
                                   const std::vector<instruction_ref>&)
Paul's avatar
Paul committed
450
    {
Shucai Xiao's avatar
Shucai Xiao committed
451
        literal v     = parse_value(attributes.at("value"));
452
453
454
        auto dim_size = attributes.at("value").t().dims_size();
        // if dim_size is 0, it is a scalar
        if(dim_size == 0)
455
        {
456
            migraphx::shape scalar_shape{v.get_shape().type()};
457
458
459
            return prog.add_literal(migraphx::literal{scalar_shape, v.data()});
        }

Paul's avatar
Paul committed
460
461
        return prog.add_literal(v);
    }
Paul's avatar
Paul committed
462

Paul's avatar
Paul committed
463
    instruction_ref
Paul's avatar
Paul committed
464
    parse_gemm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
465
466
    {
        float alpha = 1.0f;
Khalique's avatar
Khalique committed
467
        float beta  = 1.0f;
Paul's avatar
Paul committed
468
469
470
471
472
473
474
475
        bool transa = false;
        bool transb = false;
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        if(contains(attributes, "beta"))
        {
476
            beta = parse_value(attributes.at("beta")).at<float>();
Paul's avatar
Paul committed
477
478
479
480
481
482
483
484
485
486
        }
        if(contains(attributes, "transA"))
        {
            transa = parse_value(attributes.at("transA")).at<bool>();
        }
        if(contains(attributes, "transB"))
        {
            transb = parse_value(attributes.at("transB")).at<bool>();
        }
        std::vector<int64_t> perm = {1, 0};
487
488
        auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
        auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
Paul's avatar
Paul committed
489
490
        if(args.size() == 3)
        {
Khalique's avatar
Khalique committed
491
            if(beta != 0.f)
492
            {
Khalique's avatar
Khalique committed
493
                auto l3 = prog.add_instruction(op::dot{alpha}, l1, l2);
Khalique's avatar
Khalique committed
494
                auto l4 = args[2];
Khalique's avatar
Khalique committed
495
                if(l4->get_shape().scalar()) // ignore args[2] (no C value added to alpha*A*B)
Khalique's avatar
Khalique committed
496
                    return l3;
Khalique's avatar
Khalique committed
497
                if(beta != 1.f)
Khalique's avatar
Khalique committed
498
499
                {
                    auto beta_val = prog.add_literal(beta);
Khalique's avatar
Khalique committed
500
501
                    auto l5 = prog.add_instruction(op::scalar{args[2]->get_shape()}, beta_val);
                    l4      = prog.add_instruction(op::mul{}, args[2], l5);
Khalique's avatar
Khalique committed
502
503
                }
                return add_broadcastable_binary_op(l3, l4, op::add{});
504
            }
Paul's avatar
Paul committed
505
        }
506

Shucai Xiao's avatar
Shucai Xiao committed
507
        return  prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Paul's avatar
Paul committed
508
509
    }

510
    instruction_ref
Paul's avatar
Paul committed
511
    parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
512
    {
Scott Thornton's avatar
Scott Thornton committed
513
514
        float epsilon                                     = 1e-5f;
        float momentum                                    = 0.9f;
515
        op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
Scott Thornton's avatar
Scott Thornton committed
516
        bool is_test                                      = false;
517
518
519
520
521
522
        if(contains(attributes, "epsilon"))
        {
            epsilon = parse_value(attributes.at("epsilon")).at<float>();
        }
        if(contains(attributes, "momentum"))
        {
523
            momentum = parse_value(attributes.at("momentum")).at<float>();
524
525
526
        }
        if(contains(attributes, "is_test"))
        {
wsttiger's avatar
wsttiger committed
527
            is_test = parse_value(attributes.at("is_test")).at<uint64_t>() > 0;
528
529
530
        }
        if(contains(attributes, "spatial"))
        {
531
            bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
532
533
                          ? op::batch_norm_inference::spatial
                          : op::batch_norm_inference::per_activation;
534
        }
Paul's avatar
Paul committed
535
        (void)is_test;
Paul's avatar
Paul committed
536
        op::batch_norm_inference op{epsilon, momentum, bn_mode};
Paul's avatar
Paul committed
537
        return prog.add_instruction(op, std::move(args));
538
539
    }

540
541
542
543
    instruction_ref parse_leaky_relu(const std::string&,
                                     attribute_map attributes,
                                     std::vector<instruction_ref> args)
    {
Khalique's avatar
Khalique committed
544
        float alpha = 0.01; // default alpha val for leaky relu
545
546
547
548
549
550
551
552
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::leaky_relu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
553
554
    instruction_ref
    parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
555
556
557
558
559
560
561
562
563
564
    {
        float alpha = 1.0; // default alpha val for elu
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::elu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
565
566
    instruction_ref
    parse_lrn(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
567
568
    {
        float alpha = 0.0001;
Khalique's avatar
Khalique committed
569
570
571
        float beta  = 0.75;
        float bias  = 1.0;
        int size    = 1;
Khalique's avatar
Khalique committed
572
573
574
575
576
577
578
579
580
581
582
583
        if(contains(attributes, "alpha"))
            alpha = parse_value(attributes.at("alpha")).at<float>();
        if(contains(attributes, "beta"))
            beta = parse_value(attributes.at("beta")).at<float>();
        if(contains(attributes, "bias"))
            bias = parse_value(attributes.at("bias")).at<float>();
        if(contains(attributes, "size"))
            size = parse_value(attributes.at("size")).at<int>();
        op::lrn op{alpha, beta, bias, size};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
    instruction_ref parse_imagescaler(const std::string&,
                                      attribute_map attributes,
                                      std::vector<instruction_ref> args)
    {
        float scale = 1.0;
        std::vector<float> bias{};
        if(contains(attributes, "scale"))
        {
            scale = parse_value(attributes.at("scale")).at<float>();
        }

        if(contains(attributes, "bias"))
        {
            auto&& bias_floats = attributes["bias"].floats();
            bias               = std::vector<float>(bias_floats.begin(), bias_floats.end());
        }
        auto input_shape = args.front()->get_shape();
Khalique's avatar
Khalique committed
601

Khalique's avatar
Khalique committed
602
603
        auto scale_val = prog.add_literal(scale);
        auto bias_vals = prog.add_literal(
Paul's avatar
Paul committed
604
            migraphx::literal{migraphx::shape{migraphx::shape::float_type, {bias.size()}}, bias});
Khalique's avatar
Khalique committed
605

Paul's avatar
Paul committed
606
607
        auto scale_tensor = prog.add_instruction(migraphx::op::scalar{input_shape}, scale_val);
        auto img_scaled   = prog.add_instruction(migraphx::op::mul{}, args.front(), scale_tensor);
Paul's avatar
Paul committed
608
        auto bias_bcast = prog.add_instruction(migraphx::op::broadcast{1, input_shape}, bias_vals);
Paul's avatar
Paul committed
609
        return prog.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
Khalique's avatar
Khalique committed
610
    }
Khalique's avatar
Khalique committed
611

Khalique's avatar
Khalique committed
612
613
    instruction_ref
    parse_transpose(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
614
615
616
617
618
619
620
    {
        std::vector<int64_t> perm{};
        if(contains(attributes, "perm"))
        {
            auto&& perm_vals = attributes["perm"].ints();
            perm             = std::vector<int64_t>(perm_vals.begin(), perm_vals.end());
        }
Paul's avatar
Paul committed
621
        return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
Khalique's avatar
Khalique committed
622
623
    }

Khalique's avatar
Khalique committed
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
    instruction_ref
    parse_pad(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::vector<int64_t> pads{};
        float value = 0.0f;
        if(contains(attributes, "pads"))
        {
            auto&& pad_vals = attributes["pads"].ints();
            pads            = std::vector<int64_t>(pad_vals.begin(), pad_vals.end());
        }
        if(contains(attributes, "value"))
        {
            value = parse_value(attributes.at("value")).at<float>();
        }
        if(contains(attributes, "mode"))
        {
            auto mode = attributes.at("mode").s();
            if(mode != "constant")
                MIGRAPHX_THROW("migraphx currently only supports constant padding");
        }
        return prog.add_instruction(migraphx::op::pad{pads, value}, args.front());
    }
646
647
648
    // Use a literal instruction to replace the shape since, output of
    // shape operator are literals in migraphx
    instruction_ref
Shucai Xiao's avatar
Shucai Xiao committed
649
    parse_shape(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
650
651
    {
        if(args.size() != 1)
652
            MIGRAPHX_THROW("Shape: operator should have 1 operand");
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
        std::vector<std::size_t> arg_shape = args[0]->get_shape().lens();
        std::vector<int64_t> vec_shape(arg_shape.size());
        migraphx::shape s(migraphx::shape::int64_type, {arg_shape.size()});
        std::transform(arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) {
            return int64_t(i);
        });
        return prog.add_literal(migraphx::literal{s, vec_shape});
    }

    // Use a literal instruction to replace the constantFill operator. In RNN, input shape
    // and value are fixed, so no need to do the actual computation for the constantFill
    // operator
    instruction_ref parse_constant_fill(const std::string&,
                                        attribute_map attributes,
                                        std::vector<instruction_ref> args)
    {
        int input_as_shape = 0;
        int dtype          = 1;
        float value        = 0.0f;

        if(contains(attributes, "dtype"))
        {
            dtype = parse_value(attributes.at("dtype")).at<int>();
        }
        migraphx::shape::type_t type = get_type(dtype);

        if(contains(attributes, "input_as_shape"))
        {
            input_as_shape = parse_value(attributes.at("input_as_shape")).at<int>();
        }

        if(contains(attributes, "value"))
        {
            value = parse_value(attributes.at("value")).at<float>();
        }

Shucai Xiao's avatar
Shucai Xiao committed
689
690
        if(contains(attributes, "extra_shape"))
        {
691
            MIGRAPHX_THROW("ConstantFill: cannot handle extra shape attribute");
692
693
        }

694
695
        if(input_as_shape == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
696
            if(args.size() != 1)
697
            {
698
                MIGRAPHX_THROW("ConstantFill: need an input argument as output shape");
699
700
            }

Shucai Xiao's avatar
Shucai Xiao committed
701
702
            if(contains(attributes, "shape"))
            {
703
                MIGRAPHX_THROW("ConstantFill: cannot set the shape argument and pass in an input "
Shucai Xiao's avatar
Shucai Xiao committed
704
                               "at the same time");
705
706
            }

707
708
709
            migraphx::argument in = args[0]->eval();
            if(in.empty())
            {
710
                MIGRAPHX_THROW("ConstantFill: cannot handle dynamic shape as input");
711
            }
712

713
714
715
            std::vector<std::size_t> dims;
            in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
            migraphx::shape s(type, dims);
716
717
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
718
719
720
        }
        else if(input_as_shape == 0)
        {
Shucai Xiao's avatar
Shucai Xiao committed
721
722
            if(!contains(attributes, "shape"))
            {
723
                MIGRAPHX_THROW("ConstantFill: attribute output shape is needed");
724
725
726
            }

            literal ls = parse_value(attributes.at("shape"));
727
            std::vector<std::size_t> dims;
Shucai Xiao's avatar
Shucai Xiao committed
728
            ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
729
            migraphx::shape s{type, dims};
730
731
            std::vector<float> values(s.elements(), value);
            return prog.add_literal(migraphx::literal(s, values));
732
733
734
        }
        else
        {
735
            MIGRAPHX_THROW("ConstantFill: wrong value of attribute input_as_shape");
736
737
738
        }
    }

Shucai Xiao's avatar
Shucai Xiao committed
739
    std::vector<instruction_ref>
Shucai Xiao's avatar
Shucai Xiao committed
740
741
742
    parse_rnn(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
743
        std::size_t hidden_size     = args[1]->get_shape().lens()[1];
Shucai Xiao's avatar
Shucai Xiao committed
744
745
746

        if(contains(attributes, "hidden_size"))
        {
Shucai Xiao's avatar
Shucai Xiao committed
747
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
Shucai Xiao's avatar
Shucai Xiao committed
748
            if(hidden_size != hidden_size_att)
Shucai Xiao's avatar
Shucai Xiao committed
749
750
751
            {
                MIGRAPHX_THROW("RNN: hidden size mismatch in input and attribute");
            }
Shucai Xiao's avatar
Shucai Xiao committed
752
753
754
755
756
757
758
759
760
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

761
        op::rnn_direction dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
762
763
        if(direction == "bidirectional")
        {
764
            dirct = op::rnn_direction::bidirectional;
Shucai Xiao's avatar
Shucai Xiao committed
765
766
767
        }
        else if(direction == "reverse")
        {
768
            dirct = op::rnn_direction::reverse;
Shucai Xiao's avatar
Shucai Xiao committed
769
770
        }

771
772
773
774
775
        std::vector<std::string> vec_names{"tanh"};
        if(contains(attributes, "activations"))
        {
            auto names = attributes.at("activations").strings();
            vec_names.clear();
776
            vec_names.resize(names.size());
777
            std::copy(names.begin(), names.end(), vec_names.begin());
778
779
        }

780
781
782
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
783
        if(name_it != vec_names.end())
784
785
786
        {
            MIGRAPHX_THROW("RNN: activation function " + std::string(*name_it) + " not supported");
        }
787

Shucai Xiao's avatar
Shucai Xiao committed
788
        // bidirectional case should have two activation functions.
Shucai Xiao's avatar
Shucai Xiao committed
789
        // one is for forward, and the other is for reverse.
Shucai Xiao's avatar
Shucai Xiao committed
790
        // if only one actv function is provided, we use it in both
791
        // forward and reverse direction
792
        if(dirct == op::rnn_direction::bidirectional)
793
        {
Shucai Xiao's avatar
Shucai Xiao committed
794
            if(vec_names.size() == 1)
795
796
797
798
799
            {
                vec_names.push_back(vec_names.at(0));
            }
        }

Shucai Xiao's avatar
Shucai Xiao committed
800
801
802
        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& fn) {
            return map_actv_funcs[fn];
803
        });
Shucai Xiao's avatar
Shucai Xiao committed
804

Shucai Xiao's avatar
Shucai Xiao committed
805
806
807
808
809
810
811
        // To be added later
        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

812
813
        // if the number of arguments is less than 6, append
        // undefined operator to have 6 arguments
Shucai Xiao's avatar
Shucai Xiao committed
814
        if(args.size() < 6)
815
816
817
818
819
        {
            auto ins = prog.add_instruction(op::undefined{});
            args.insert(args.end(), (6 - args.size()), ins);
        }

Shucai Xiao's avatar
Shucai Xiao committed
820
821
        // first output for the concatenation of hidden states
        auto hidden_states = prog.add_instruction(op::rnn{hidden_size, vec_actv_funcs, dirct, clip},
Shucai Xiao's avatar
Shucai Xiao committed
822
                                                  std::move(args));
Shucai Xiao's avatar
Shucai Xiao committed
823

824
        // second output for the last hidden state
Shucai Xiao's avatar
Shucai Xiao committed
825
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
Shucai Xiao's avatar
Shucai Xiao committed
826

Shucai Xiao's avatar
Shucai Xiao committed
827
        return {hidden_states, last_output};
Shucai Xiao's avatar
Shucai Xiao committed
828
829
    }

830
    std::vector<instruction_ref>
831
832
833
834
835
836
837
    parse_gru(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
        std::size_t hidden_size     = args[2]->get_shape().lens()[2];

        if(contains(attributes, "hidden_size"))
        {
Shucai Xiao's avatar
Shucai Xiao committed
838
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
Shucai Xiao's avatar
Shucai Xiao committed
839
            if(hidden_size != hidden_size_att)
Shucai Xiao's avatar
Shucai Xiao committed
840
841
842
            {
                MIGRAPHX_THROW("GRU: hidden size mismatch in input and attribute");
            }
843
844
845
846
847
848
849
850
851
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

852
        op::rnn_direction dirct = op::rnn_direction::forward;
853
854
        if(direction == "bidirectional")
        {
855
            dirct = op::rnn_direction::bidirectional;
856
857
858
        }
        else if(direction == "reverse")
        {
859
            dirct = op::rnn_direction::reverse;
860
861
        }

862
        std::vector<std::string> vec_names = {"sigmoid", "tanh"};
863
864
        if(contains(attributes, "activations"))
        {
865
            auto names = attributes.at("activations").strings();
866
            vec_names.clear();
Shucai Xiao's avatar
Shucai Xiao committed
867
            vec_names.resize(names.size());
868
            std::copy(names.begin(), names.end(), vec_names.begin());
869
870
        }

871
        // need 4 activation functions
872
        if(dirct == op::rnn_direction::bidirectional)
873
        {
Shucai Xiao's avatar
Shucai Xiao committed
874
            // 4 activation functions are used in the bidirectional
875
            // scenario. No spec is provided in onnx::operator. we
Shucai Xiao's avatar
Shucai Xiao committed
876
877
            // use the algorithm that: if 1 actv function is provided,
            // repeat 1 four times. If 2 actv functins are provided,
878
879
            // assume forward and reverse use the same pair of actv
            // functions. For the case of 3 actv functions provided,
Shucai Xiao's avatar
Shucai Xiao committed
880
881
882
            // assume the 3rd one is repeated once and used by the
            // reverse direction.
            // This may need change later
883
            if(vec_names.size() == 1)
884
            {
885
                vec_names.insert(vec_names.end(), 3, vec_names.at(0));
886
            }
887
            else if(vec_names.size() == 2)
888
            {
889
890
891
                // repeat the activation functions
                vec_names.push_back(vec_names.at(0));
                vec_names.push_back(vec_names.at(1));
892
            }
893
            else if(vec_names.size() == 3)
894
            {
895
                vec_names.push_back(vec_names.at(2));
896
897
            }
        }
Shucai Xiao's avatar
Shucai Xiao committed
898
        else
899
        {
900
            if(vec_names.size() == 1)
901
            {
902
                vec_names.push_back(vec_names.at(0));
903
904
905
            }
        }

906
907
908
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
909
910
        if(name_it != vec_names.end())
        {
911
912
            MIGRAPHX_THROW("GRU: activation function " + std::string(*name_it) + " not supported");
        }
913

Shucai Xiao's avatar
Shucai Xiao committed
914
915
916
        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) {
            return map_actv_funcs[name];
Shucai Xiao's avatar
Shucai Xiao committed
917
        });
918
919
920
921
922
923
924
925

        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

        int linear_before_reset = 0;
Shucai Xiao's avatar
Shucai Xiao committed
926
        if(contains(attributes, "linear_before_reset"))
927
928
929
930
        {
            linear_before_reset = parse_value(attributes.at("linear_before_reset")).at<int>();
        }

Shucai Xiao's avatar
Shucai Xiao committed
931
        // append undefined opeator to make 6 arguments
Shucai Xiao's avatar
Shucai Xiao committed
932
        if(args.size() < 6)
Shucai Xiao's avatar
Shucai Xiao committed
933
934
935
936
937
        {
            auto ins = prog.add_instruction(op::undefined{});
            args.insert(args.end(), 6 - args.size(), ins);
        }

938
939
        // first output for concatenation of hidden states
        auto hidden_states = prog.add_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
940
            op::gru{hidden_size, vec_actv_funcs, dirct, clip, linear_before_reset},
Shucai Xiao's avatar
Shucai Xiao committed
941
            std::move(args));
942
943

        // second output for last gru output
944
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
945

Shucai Xiao's avatar
Shucai Xiao committed
946
        return {hidden_states, last_output};
947
948
    }

Shucai Xiao's avatar
Shucai Xiao committed
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
    std::vector<instruction_ref>
    parse_lstm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        migraphx::shape input_shape = args[0]->get_shape();
        std::size_t hidden_size     = args[2]->get_shape().lens()[2];

        if(contains(attributes, "hidden_size"))
        {
            std::size_t hidden_size_att = parse_value(attributes.at("hidden_size")).at<int>();
            if(hidden_size != hidden_size_att)
            {
                MIGRAPHX_THROW("LSTM: hidden size mismatch in input and attribute");
            }
        }

        // Handling of direction to be added later
        std::string direction{"forward"};
        if(contains(attributes, "direction"))
        {
            direction = attributes.at("direction").s();
        }

Shucai Xiao's avatar
Shucai Xiao committed
971
        op::rnn_direction dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
972
973
        if(direction == "bidirectional")
        {
Shucai Xiao's avatar
Shucai Xiao committed
974
            dirct = op::rnn_direction::bidirectional;
Shucai Xiao's avatar
Shucai Xiao committed
975
976
977
        }
        else if(direction == "reverse")
        {
Shucai Xiao's avatar
Shucai Xiao committed
978
            dirct = op::rnn_direction::reverse;
Shucai Xiao's avatar
Shucai Xiao committed
979
        }
Shucai Xiao's avatar
Shucai Xiao committed
980
        else if(direction == "forward")
Shucai Xiao's avatar
Shucai Xiao committed
981
        {
Shucai Xiao's avatar
Shucai Xiao committed
982
            dirct = op::rnn_direction::forward;
Shucai Xiao's avatar
Shucai Xiao committed
983
984
985
986
987
988
989
990
991
992
993
994
        }
        else
        {
            MIGRAPHX_THROW("LSTM: incorrect direction attribute");
        }

        std::vector<std::string> vec_names = {"sigmoid", "tanh", "tanh"};
        if(contains(attributes, "activations"))
        {
            auto names = attributes.at("activations").strings();
            vec_names.clear();
            vec_names.resize(names.size());
995
            std::copy(names.begin(), names.end(), vec_names.begin());
Shucai Xiao's avatar
Shucai Xiao committed
996
997
998
        }

        // need 6 activation functions for bidirectional directions
Shucai Xiao's avatar
Shucai Xiao committed
999
        if(dirct == op::rnn_direction::bidirectional)
Shucai Xiao's avatar
Shucai Xiao committed
1000
1001
1002
1003
1004
1005
        {
            // 6 activation functions are used in the bidirectional
            // scenario. No spec is provided in onnx::operator. we
            // use the algorithm that: if 1 actv function is provided,
            // repeat 1st six times. If 2 actv functins are provided,
            // repeat 2nd once, then repeat all three once
Shucai Xiao's avatar
Shucai Xiao committed
1006
            // if 3 actv funcs are provide, repeat all three once.
Shucai Xiao's avatar
Shucai Xiao committed
1007
1008
1009
1010
            // the same algorithm is used for 4, 5, and 6 actv funcions
            // provided. This may need change later
            switch(vec_names.size())
            {
1011
            case 1:
Shucai Xiao's avatar
Shucai Xiao committed
1012
1013
1014
1015
1016
1017
                vec_names = {vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0),
                             vec_names.at(0)};
1018
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1019
1020
1021

            case 2:
                // repeat the 2nd actv func once, then repeat all three another time
Shucai Xiao's avatar
Shucai Xiao committed
1022
1023
1024
1025
1026
1027
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(1),
                             vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(1)};
Shucai Xiao's avatar
Shucai Xiao committed
1028
1029
1030
1031
                break;

            case 3:
                // repeat all three actv funcs once
Shucai Xiao's avatar
Shucai Xiao committed
1032
1033
1034
1035
1036
1037
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2)};
Shucai Xiao's avatar
Shucai Xiao committed
1038
1039
                break;

Shucai Xiao's avatar
Shucai Xiao committed
1040
1041
1042
1043
1044
1045
1046
            case 4:
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(3),
                             vec_names.at(3),
                             vec_names.at(3)};
1047
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1048

Shucai Xiao's avatar
Shucai Xiao committed
1049
1050
1051
1052
1053
1054
1055
            case 5:
                vec_names = {vec_names.at(0),
                             vec_names.at(1),
                             vec_names.at(2),
                             vec_names.at(3),
                             vec_names.at(4),
                             vec_names.at(4)};
1056
                break;
Shucai Xiao's avatar
Shucai Xiao committed
1057

Shucai Xiao's avatar
Shucai Xiao committed
1058
            default: break;
Shucai Xiao's avatar
Shucai Xiao committed
1059
1060
1061
1062
1063
1064
            }
        }
        else
        {
            switch(vec_names.size())
            {
Shucai Xiao's avatar
Shucai Xiao committed
1065
            case 1: vec_names = {vec_names.at(0), vec_names.at(0), vec_names.at(0)}; break;
Shucai Xiao's avatar
Shucai Xiao committed
1066
1067
1068

            case 2:
                // repeat the 2nd actv func once, so we have 3 actv funcs
Shucai Xiao's avatar
Shucai Xiao committed
1069
                vec_names = {vec_names.at(0), vec_names.at(1), vec_names.at(1)};
Shucai Xiao's avatar
Shucai Xiao committed
1070
1071
                break;

Shucai Xiao's avatar
Shucai Xiao committed
1072
            default: break;
Shucai Xiao's avatar
Shucai Xiao committed
1073
1074
1075
            }
        }

1076
1077
1078
        auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
            return (map_actv_funcs.count(name) == 0);
        });
Shucai Xiao's avatar
Shucai Xiao committed
1079
        if(name_it != vec_names.end())
1080
1081
1082
        {
            MIGRAPHX_THROW("LSTM: activation function " + std::string(*name_it) + " not supported");
        }
Shucai Xiao's avatar
Shucai Xiao committed
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104

        std::vector<operation> vec_actv_funcs(vec_names.size());
        std::transform(vec_names.begin(), vec_names.end(), vec_actv_funcs.begin(), [&](auto& name) {
            return map_actv_funcs[name];
        });

        float clip = 0.0;
        if(contains(attributes, "clip"))
        {
            clip = parse_value(attributes.at("clip")).at<float>();
        }

        int input_forget = 0;
        if(contains(attributes, "input_forget"))
        {
            input_forget = parse_value(attributes.at("input_forget")).at<int>();
        }

        // append undefined opeator to make 6 arguments
        if(args.size() < 8)
        {
            auto ins = prog.add_instruction(op::undefined{});
Shucai Xiao's avatar
Shucai Xiao committed
1105
            args.insert(args.end(), 8 - args.size(), ins);
Shucai Xiao's avatar
Shucai Xiao committed
1106
1107
1108
1109
        }

        // first output for concatenation of hidden states
        auto hidden_states = prog.add_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
1110
            op::lstm{hidden_size, vec_actv_funcs, dirct, clip, input_forget}, std::move(args));
Shucai Xiao's avatar
Shucai Xiao committed
1111
1112

        // second output for last lstm output
Shucai Xiao's avatar
Shucai Xiao committed
1113
        auto last_output = prog.add_instruction(op::rnn_last_output{}, hidden_states);
Shucai Xiao's avatar
Shucai Xiao committed
1114
1115
1116
1117
1118
1119
1120

        // third output for last cell output
        auto last_cell_output = prog.add_instruction(op::lstm_last_cell_output{}, hidden_states);

        return {hidden_states, last_output, last_cell_output};
    }

Paul's avatar
Paul committed
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
    void parse_from(std::istream& is)
    {
        onnx::ModelProto model;
        if(model.ParseFromIstream(&is))
        {
            if(model.has_graph())
            {
                this->parse_graph(model.graph());
            }
        }
        else
        {
Paul's avatar
Paul committed
1133
            MIGRAPHX_THROW("Failed reading onnx file.");
Paul's avatar
Paul committed
1134
1135
1136
1137
1138
1139
        }
    }

    void parse_graph(const onnx::GraphProto& graph)
    {
        nodes = get_nodes(graph);
1140
1141
1142
1143
1144
        std::unordered_map<std::string, onnx::TensorProto> initializer_data;
        for(auto&& f : graph.initializer())
        {
            initializer_data[f.name()] = f;
        }
Paul's avatar
Paul committed
1145
1146
1147
        for(auto&& input : graph.input())
        {
            const std::string& name = input.name();
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
            // Does the input have an initializer?
            if(contains(initializer_data, name))
            {
                auto t             = initializer_data[name];
                instructions[name] = prog.add_literal(parse_tensor(t));
            }
            else
            {
                // TODO: Get shape of input parameter
                shape s            = parse_type(input.type());
                instructions[name] = prog.add_parameter(name, s);
            }
Paul's avatar
Paul committed
1160
1161
1162
        }
        for(auto&& p : nodes)
        {
Paul's avatar
Paul committed
1163
            this->parse_node(p.first);
Paul's avatar
Paul committed
1164
1165
1166
        }
    }

Shucai Xiao's avatar
Shucai Xiao committed
1167
    void parse_undefined(const std::string& name)
1168
    {
Shucai Xiao's avatar
Shucai Xiao committed
1169
        auto ins           = prog.add_instruction(op::undefined{});
1170
1171
1172
        instructions[name] = ins;
    }

Paul's avatar
Paul committed
1173
    void parse_node(const std::string& name)
Paul's avatar
Paul committed
1174
    {
Paul's avatar
Paul committed
1175
        if(name.empty())
Paul's avatar
Paul committed
1176
            MIGRAPHX_THROW("Onnx node must have a name");
Paul's avatar
Paul committed
1177
1178
1179
1180
1181
1182
1183
1184
        if(instructions.count(name) == 0)
        {
            auto&& node = nodes.at(name);
            std::vector<instruction_ref> args;
            for(auto&& input : node.input())
            {
                if(nodes.count(input) > 0)
                {
Paul's avatar
Paul committed
1185
1186
                    assert(name != input);
                    this->parse_node(input);
Paul's avatar
Paul committed
1187
                }
Shucai Xiao's avatar
Shucai Xiao committed
1188
                else if(input.empty())
Paul's avatar
Paul committed
1189
                {
1190
                    this->parse_undefined(input);
Paul's avatar
Paul committed
1191
                }
1192
                args.push_back(instructions.at(input));
Paul's avatar
Paul committed
1193
            }
Paul's avatar
Paul committed
1194
            std::vector<instruction_ref> result;
Paul's avatar
Paul committed
1195
1196
            if(ops.count(node.op_type()) == 0)
            {
Paul's avatar
Paul committed
1197
                result.push_back(prog.add_instruction(unknown{node.op_type()}, args));
Paul's avatar
Paul committed
1198
1199
1200
            }
            else
            {
Paul's avatar
Paul committed
1201
                result = ops[node.op_type()](get_attributes(node), args);
Paul's avatar
Paul committed
1202
            }
Paul's avatar
Paul committed
1203
            // Even no output nodes produce output in migraphx
Paul's avatar
Paul committed
1204
            if(node.output().empty() and result.size() == 1)
Paul's avatar
Paul committed
1205
1206
            {
                instructions[name] = result.front();
Paul's avatar
Paul committed
1207
1208
1209
            }
            else
            {
Paul's avatar
Paul committed
1210
1211
1212
1213
1214
1215
                assert(node.output().size() >= result.size());
                std::transform(result.begin(),
                               result.end(),
                               node.output().begin(),
                               std::inserter(instructions, instructions.end()),
                               [](auto&& x, auto&& y) { return std::make_pair(y, x); });
Paul's avatar
Paul committed
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
            }
        }
    }

    static attribute_map get_attributes(const onnx::NodeProto& node)
    {
        std::unordered_map<std::string, onnx::AttributeProto> result;
        for(auto&& attr : node.attribute())
        {
            result[attr.name()] = attr;
        }
        return result;
    }

    static node_map get_nodes(const onnx::GraphProto& graph)
    {
        std::unordered_map<std::string, onnx::NodeProto> result;
Paul's avatar
Paul committed
1233
        std::size_t n = 0;
Paul's avatar
Paul committed
1234
1235
        for(auto&& node : graph.node())
        {
Paul's avatar
Paul committed
1236
            if(node.output().empty())
Paul's avatar
Paul committed
1237
            {
Paul's avatar
Paul committed
1238
                if(node.name().empty())
Paul's avatar
Paul committed
1239
1240
1241
1242
1243
1244
1245
1246
1247
                {
                    result["migraphx_unamed_node_" + std::to_string(n)] = node;
                    n++;
                }
                else
                {
                    result[node.name()] = node;
                }
            }
Paul's avatar
Paul committed
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
            for(auto&& output : node.output())
            {
                result[output] = node;
            }
        }
        return result;
    }

    template <class T>
    static literal from_repeated(shape::type_t t, const T& r)
    {
        std::size_t size = r.size();
        return literal{{t, {size}}, r.begin(), r.end()};
    }

    static literal parse_value(const onnx::AttributeProto& attr)
    {
        switch(attr.type())
        {
        case onnx::AttributeProto::UNDEFINED: return {};
        case onnx::AttributeProto::FLOAT: return literal{attr.f()};
        case onnx::AttributeProto::INT: return literal{attr.i()};
        case onnx::AttributeProto::STRING: return {};
        case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
        case onnx::AttributeProto::GRAPH: return {};
Paul's avatar
Paul committed
1273
        case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
Paul's avatar
Paul committed
1274
1275
1276
1277
1278
        case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
        case onnx::AttributeProto::STRINGS: return {};
        case onnx::AttributeProto::TENSORS: return {};
        case onnx::AttributeProto::GRAPHS: return {};
        }
Paul's avatar
Paul committed
1279
        MIGRAPHX_THROW("Invalid attribute type");
Paul's avatar
Paul committed
1280
1281
1282
1283
1284
    }

    static literal parse_tensor(const onnx::TensorProto& t)
    {
        std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
Khalique's avatar
Khalique committed
1285
        // in case of scalar constants in onnx file, use dims=1 to fill initializer data
1286
        if(dims.empty())
Khalique's avatar
Khalique committed
1287
1288
1289
        {
            dims = {1};
        }
1290
1291
        if(t.has_raw_data())
        {
wsttiger's avatar
wsttiger committed
1292
            const std::string& s = t.raw_data();
Scott Thornton's avatar
Scott Thornton committed
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
            switch(t.data_type())
            {
            case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
            case onnx::TensorProto::FLOAT: return literal{{shape::float_type, dims}, s.data()};
            case onnx::TensorProto::UINT8: throw std::runtime_error("");
            case onnx::TensorProto::INT8: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::UINT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT32: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT64: return literal{{shape::int64_type, dims}, s.data()};
            case onnx::TensorProto::STRING: throw std::runtime_error("");
            case onnx::TensorProto::BOOL: return literal{{shape::int32_type, dims}, s.data()};
Paul's avatar
Paul committed
1305
            case onnx::TensorProto::FLOAT16: return literal{{shape::half_type, dims}, s.data()};
Scott Thornton's avatar
Scott Thornton committed
1306
1307
1308
1309
1310
1311
            case onnx::TensorProto::DOUBLE: return literal{{shape::double_type, dims}, s.data()};
            case onnx::TensorProto::UINT32: throw std::runtime_error("");
            case onnx::TensorProto::UINT64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
            }
Paul's avatar
Paul committed
1312
            MIGRAPHX_THROW("Invalid tensor type");
1313
        }
Paul's avatar
Paul committed
1314
1315
1316
1317
        switch(t.data_type())
        {
        case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
        case onnx::TensorProto::FLOAT:
Paul's avatar
Paul committed
1318
            return literal{{shape::float_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
1319
1320
        case onnx::TensorProto::UINT8: throw std::runtime_error("");
        case onnx::TensorProto::INT8:
Paul's avatar
Paul committed
1321
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1322
        case onnx::TensorProto::UINT16:
Paul's avatar
Paul committed
1323
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1324
        case onnx::TensorProto::INT16:
Paul's avatar
Paul committed
1325
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1326
        case onnx::TensorProto::INT32:
Paul's avatar
Paul committed
1327
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1328
        case onnx::TensorProto::INT64:
Paul's avatar
Paul committed
1329
            return literal{{shape::int64_type, dims}, t.int64_data().begin(), t.int64_data().end()};
Paul's avatar
Paul committed
1330
1331
        case onnx::TensorProto::STRING: throw std::runtime_error("");
        case onnx::TensorProto::BOOL:
Paul's avatar
Paul committed
1332
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
1333
        case onnx::TensorProto::FLOAT16:
Khalique's avatar
Khalique committed
1334
        {
Khalique's avatar
Khalique committed
1335
            std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
1336
            std::vector<half> data_half;
Khalique's avatar
Khalique committed
1337
1338
1339
            std::transform(data_uint16.begin(),
                           data_uint16.end(),
                           std::back_inserter(data_half),
1340
                           [](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); });
1341
            return literal{{shape::half_type, dims}, data_half.begin(), data_half.end()};
Khalique's avatar
Khalique committed
1342
        }
Paul's avatar
Paul committed
1343
1344
1345
1346
1347
1348
1349
1350
        case onnx::TensorProto::DOUBLE:
            return literal{
                {shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
        case onnx::TensorProto::UINT32: throw std::runtime_error("");
        case onnx::TensorProto::UINT64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
        }
Paul's avatar
Paul committed
1351
        MIGRAPHX_THROW("Invalid tensor type");
Paul's avatar
Paul committed
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
    }

    static shape parse_type(const onnx::TypeProto& t)
    {
        shape::type_t shape_type{};
        switch(t.tensor_type().elem_type())
        {
        case onnx::TensorProto::UNDEFINED:
            break; // throw std::runtime_error("Unsupported type UNDEFINED");
        case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
        case onnx::TensorProto::UINT8:
            break; // throw std::runtime_error("Unsupported type UINT8");
        case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
        case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
        case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
        case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
        case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
        case onnx::TensorProto::STRING:
            break; // throw std::runtime_error("Unsupported type STRING");
        case onnx::TensorProto::BOOL:
            break; // throw std::runtime_error("Unsupported type BOOL");
Paul's avatar
Paul committed
1373
        case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
Paul's avatar
Paul committed
1374
1375
1376
1377
1378
1379
1380
1381
1382
        case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
        case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
        case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
        case onnx::TensorProto::COMPLEX64:
            break; // throw std::runtime_error("Unsupported type COMPLEX64");
        case onnx::TensorProto::COMPLEX128:
            break; // throw std::runtime_error("Unsupported type COMPLEX128");
        }
        std::vector<std::size_t> dims;
Paul's avatar
Paul committed
1383
        auto&& tensor_dims = t.tensor_type().shape().dim();
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
        std::transform(tensor_dims.begin(),
                       tensor_dims.end(),
                       std::back_inserter(dims),
                       [](auto&& d) -> std::size_t {
                           if(not d.has_dim_value())
                           {
                               long default_batch_size = 1; // FIXME
                               return default_batch_size;
                           }
                           return d.dim_value();
                       });
Paul's avatar
Paul committed
1395
1396
        return {shape_type, dims};
    }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418

    shape::type_t get_type(int dtype)
    {
        switch(dtype)
        {
        case 1: return shape::float_type;
        case 2: return shape::uint8_type;
        case 3: return shape::int8_type;
        case 4: return shape::uint16_type;
        case 5: return shape::int16_type;
        case 6: return shape::int32_type;
        case 7: return shape::int64_type;
        case 10: return shape::half_type;
        case 11: return shape::double_type;
        case 12: return shape::uint32_type;
        case 13: return shape::uint64_type;
        default:
        {
            MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
        }
        }
    }
Paul's avatar
Paul committed
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
};

program parse_onnx(const std::string& name)
{
    std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
    onnx_parser parser;
#ifndef NDEBUG
    // Log the program when it can't be parsed
    try
    {
        parser.parse_from(input);
    }
    catch(...)
    {
        std::cerr << parser.prog << std::endl;
        throw;
    }
#else
    parser.parse_from(input);
#endif
    return std::move(parser.prog);
}

Paul's avatar
Paul committed
1442
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
1443
} // namespace migraphx