onnx.cpp 29.5 KB
Newer Older
Paul's avatar
Paul committed
1
2
3
4
5
6
7
8
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <functional>
#include <array>
Paul's avatar
Paul committed
9
#include <utility>
10
#include <vector>
Paul's avatar
Paul committed
11

Paul's avatar
Paul committed
12
13
14
15
16
17
18
19
#include <migraphx/fallthrough.hpp>
#include <migraphx/program.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>

namespace migraphx {
Paul's avatar
Paul committed
20
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
struct unknown
{
    std::string op;
    std::string name() const { return "unknown:" + op; }
    shape compute_shape(std::vector<shape> input) const
    {
        if(input.empty())
            return {};
        else
            return input.front();
    }
    friend std::ostream& operator<<(std::ostream& os, const unknown& x)
    {
        os << x.name();
        return os;
    }
};

struct onnx_parser
{
    using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
    using node_map      = std::unordered_map<std::string, onnx::NodeProto>;
Paul's avatar
Paul committed
43
    using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
Paul's avatar
Paul committed
44
45
    node_map nodes;
    std::unordered_map<std::string, instruction_ref> instructions;
Scott Thornton's avatar
Scott Thornton committed
46
    program prog    = program();
47
    bool is_pytorch = false;
Paul's avatar
Paul committed
48
49
50
51
52

    std::unordered_map<std::string, op_func> ops;

    onnx_parser()
    {
Shucai Xiao's avatar
Shucai Xiao committed
53
        add_generic_op("MatMul", op::dot{});
Khalique's avatar
Khalique committed
54
        add_generic_op("Relu", op::relu{});
Khalique's avatar
Khalique committed
55
56
        add_generic_op("Sigmoid", op::sigmoid{});
        add_generic_op("Abs", op::abs{});
Shucai Xiao's avatar
Shucai Xiao committed
57
58
        add_generic_op("Exp", op::exp{});
        add_generic_op("Log", op::log{});
Khalique's avatar
Khalique committed
59
60
        // disable dropout for inference
        add_generic_op("Dropout", op::identity{});
Khalique's avatar
Khalique committed
61
        add_generic_op("Identity", op::identity{});
Shucai Xiao's avatar
Shucai Xiao committed
62
63
64
        add_generic_op("Sin", op::sin{});
        add_generic_op("Cos", op::cos{});
        add_generic_op("Tan", op::tan{});
65
66
        add_generic_op("Sinh", op::sinh{});
        add_generic_op("Cosh", op::cosh{});
67
        add_generic_op("Tanh", op::tanh{});
68
69
70
        add_generic_op("Asin", op::asin{});
        add_generic_op("Acos", op::acos{});
        add_generic_op("Atan", op::atan{});
Paul's avatar
Paul committed
71

72
73
74
75
76
        add_broadcastable_binary_op("Add", op::add{});
        add_broadcastable_binary_op("Div", op::div{});
        add_broadcastable_binary_op("Mul", op::mul{});
        add_broadcastable_binary_op("Sub", op::sub{});
        add_broadcastable_binary_op("Sum", op::add{});
Paul's avatar
Paul committed
77

Khalique's avatar
Khalique committed
78
        add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
79
        add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
Khalique's avatar
Khalique committed
80
        add_mem_op("Elu", &onnx_parser::parse_elu);
Paul's avatar
Paul committed
81
82
        add_mem_op("Constant", &onnx_parser::parse_constant);
        add_mem_op("Conv", &onnx_parser::parse_conv);
Paul's avatar
Paul committed
83
84
        add_mem_op("MaxPool", &onnx_parser::parse_pooling);
        add_mem_op("AveragePool", &onnx_parser::parse_pooling);
85
86
        add_mem_op("GlobalMaxPool", &onnx_parser::parse_pooling);
        add_mem_op("GlobalAveragePool", &onnx_parser::parse_pooling);
Paul's avatar
Paul committed
87
        add_mem_op("Reshape", &onnx_parser::parse_reshape);
Paul's avatar
Paul committed
88
89
        add_mem_op("Flatten", &onnx_parser::parse_flatten);
        add_mem_op("Gemm", &onnx_parser::parse_gemm);
90
        add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
Paul's avatar
Paul committed
91
        add_mem_op("Softmax", &onnx_parser::parse_softmax);
92
93
94
        add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
        add_mem_op("Unsqueeze", &onnx_parser::parse_unsqueeze);
        add_mem_op("Slice", &onnx_parser::parse_slice);
Scott Thornton's avatar
Scott Thornton committed
95
        add_mem_op("Concat", &onnx_parser::parse_concat);
Khalique's avatar
Khalique committed
96
        add_mem_op("Transpose", &onnx_parser::parse_transpose);
Paul's avatar
Paul committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    }

    template <class F>
    void add_op(std::string name, F f)
    {
        ops.emplace(name, f);
    }

    template <class F>
    void add_mem_op(std::string name, F f)
    {
        ops.emplace(name, [=](auto&&... xs) {
            return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
        });
    }
112
113
114
115
    template <class T>
    void add_broadcastable_binary_op(std::string name, T x)
    {
        ops.emplace(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
Scott Thornton's avatar
Scott Thornton committed
116
            if(args.size() != 2)
Paul's avatar
Paul committed
117
                MIGRAPHX_THROW("binary operators should have 2 operands");
118
119
120
121
122
123
124
125
126
127
128
129
            if(contains(attributes, "broadcast"))
            {
                uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
                if(broadcasted != 0)
                {
                    uint64_t axis = (contains(attributes, "axis"))
                                        ? parse_value(attributes.at("axis")).at<uint64_t>()
                                        : 0;
                    auto l =
                        prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
                    return prog.add_instruction(x, args[0], l);
                }
130
                return prog.add_instruction(x, args);
131
            }
132
            else if(args[0]->get_shape() != args[1]->get_shape())
133
134
135
136
            {
                // Example:
                // s0 = (3,2,4,5) and s1 = (2,1,1)
                //
Scott Thornton's avatar
Scott Thornton committed
137
138
                // In this case we need to broadcast (:,1,1) portion of
                // s1 plus broadcast the 1st dimension of s1
139
140
141
142
143
144
145
146
147
                // giving output_lens = (3,2,4,5)
                //
                // Another example:
                // s0 = (3,2,1,5) and s1 = (2,7,5)
                // In this case we need to broadcast the (:,:,1:,:) axis
                // of s0 plus the 1st dimension of s1 giving
                // output_lens = (3,2,7,5)
                //
                // Get lengths for both arguments
Paul's avatar
Paul committed
148
149
150
151
152
153
                const std::vector<std::size_t>* s0 = &args[0]->get_shape().lens();
                const std::vector<std::size_t>* s1 = &args[1]->get_shape().lens();

                // Make sure s0 is the smaller size
                if(s0->size() > s1->size())
                    std::swap(s0, s1);
154
155

                // Copy the larger vector to output_lens
156
                std::vector<std::size_t> output_lens = *s1;
Scott Thornton's avatar
Scott Thornton committed
157
                auto offset                          = s1->size() - s0->size();
Paul's avatar
Paul committed
158
159
160
161
162
163
                std::transform(s0->begin(),
                               s0->end(),
                               s1->begin() + offset,
                               output_lens.begin() + offset,
                               [](auto a, auto b) { return std::max(a, b); });

164
165
166
                auto l0 = prog.add_instruction(op::multibroadcast{output_lens}, args[0]);
                auto l1 = prog.add_instruction(op::multibroadcast{output_lens}, args[1]);
                return prog.add_instruction(x, l0, l1);
Paul's avatar
Paul committed
167
168
            }
            else
169
170
            {
                return prog.add_instruction(x, args);
171
172
173
174
            }
        });
    }

Paul's avatar
Paul committed
175
    template <class T>
Paul's avatar
Paul committed
176
177
    void add_generic_op(std::string name, T x)
    {
Paul's avatar
Paul committed
178
        ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
Paul's avatar
Paul committed
179
180
181
182
            return prog.add_instruction(x, args);
        });
    }

Paul's avatar
Paul committed
183
    instruction_ref
Paul's avatar
Paul committed
184
    parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
185
186
    {
        auto dims = args.front()->get_shape().lens();
Scott Thornton's avatar
Scott Thornton committed
187
188
        auto r =
            prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, args.front());
189
190
        auto s = prog.add_instruction(op::softmax{}, r);
        return prog.add_instruction(op::reshape{{long(dims[0]), long(dims[1])}}, s);
Paul's avatar
Paul committed
191
192
    }

Paul's avatar
Paul committed
193
    instruction_ref
Paul's avatar
Paul committed
194
    parse_conv(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
195
    {
196
        op::convolution op;
Paul's avatar
Paul committed
197
198
        if(contains(attributes, "pads"))
        {
Scott Thornton's avatar
Scott Thornton committed
199
            if(contains(attributes, "auto_pad"))
200
            {
Paul's avatar
Paul committed
201
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
202
203
204
            }
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
205
            if(padding.size() != 4)
206
            {
Paul's avatar
Paul committed
207
                MIGRAPHX_THROW("padding should have 4 values");
208
            }
Scott Thornton's avatar
Scott Thornton committed
209
            if(padding[0] != padding[2] || padding[1] != padding[3])
210
            {
Paul's avatar
Paul committed
211
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
212
213
214
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
215
        }
Paul's avatar
Paul committed
216
217
218
219
220
221
222
223
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "dilations"))
        {
            copy(attributes["dilations"].ints(), op.dilation.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
224
        if(contains(attributes, "auto_pad"))
225
226
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
227
            if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
228
            {
Paul's avatar
Paul committed
229
                MIGRAPHX_THROW("auto_pad and padding cannot be specified simultaneously");
230
231
            }

wsttiger's avatar
fixes  
wsttiger committed
232
            if(s.find("SAME") != std::string::npos)
233
234
235
236
            {
                op.padding_mode = op::convolution::same;
            }
        }
Paul's avatar
Paul committed
237
238
239
240
        if(args.size() == 3)
        {
            uint64_t axis = 1;
            auto l1       = prog.add_instruction(op, args[0], args[1]);
Scott Thornton's avatar
Scott Thornton committed
241
            auto l2       = prog.add_instruction(op::broadcast{axis, l1->get_shape()}, args[2]);
242
            return prog.add_instruction(op::add{}, l1, l2);
Paul's avatar
Paul committed
243
        }
Paul's avatar
Paul committed
244
245
        return prog.add_instruction(op, args);
    }
Paul's avatar
Paul committed
246

Paul's avatar
Paul committed
247
248
249
    instruction_ref parse_pooling(const std::string& name,
                                  attribute_map attributes,
                                  std::vector<instruction_ref> args)
Paul's avatar
Paul committed
250
    {
Khalique's avatar
Khalique committed
251
252
        op::pooling op{ends_with(name, "MaxPool") ? "max" : "average"};
        if(starts_with(name, "Global"))
253
        {
Khalique's avatar
Khalique committed
254
255
            auto lens  = args.front()->get_shape().lens();
            op.lengths = {lens[2], lens[3]};
256
        }
Paul's avatar
Paul committed
257
258
        if(contains(attributes, "pads"))
        {
259
260
            std::vector<std::size_t> padding(4);
            copy(attributes["pads"].ints(), padding.begin());
Scott Thornton's avatar
Scott Thornton committed
261
            if(padding.size() != 4)
262
            {
Paul's avatar
Paul committed
263
                MIGRAPHX_THROW("padding should have 4 values");
264
            }
Scott Thornton's avatar
Scott Thornton committed
265
            if(padding[0] != padding[2] || padding[1] != padding[3])
266
            {
Paul's avatar
Paul committed
267
                MIGRAPHX_THROW("migraphx does not support asymetric padding");
268
269
270
            }
            op.padding[0] = padding[0];
            op.padding[1] = padding[1];
Paul's avatar
Paul committed
271
272
273
274
275
276
277
278
279
        }
        if(contains(attributes, "strides"))
        {
            copy(attributes["strides"].ints(), op.stride.begin());
        }
        if(contains(attributes, "kernel_shape"))
        {
            copy(attributes["kernel_shape"].ints(), op.lengths.begin());
        }
Scott Thornton's avatar
Scott Thornton committed
280
        if(contains(attributes, "auto_pad"))
281
282
        {
            auto s = attributes["auto_pad"].s();
Scott Thornton's avatar
Scott Thornton committed
283
            if(to_upper(s) != "NOTSET")
284
            {
Paul's avatar
Paul committed
285
                MIGRAPHX_THROW("auto_pad is not supported for pooling");
286
287
288
            }
        }

Paul's avatar
Paul committed
289
        return prog.add_instruction(op, std::move(args));
Paul's avatar
Paul committed
290
291
    }

Paul's avatar
Paul committed
292
    instruction_ref
Paul's avatar
Paul committed
293
    parse_reshape(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
294
    {
295
        op::reshape op;
Paul's avatar
Paul committed
296
297
298
299
300
301
302
        if(args.size() == 1)
        {
            literal s = parse_value(attributes.at("shape"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
        }
        if(args.size() == 2)
        {
Paul's avatar
Paul committed
303
            literal s = args[1]->get_literal();
Paul's avatar
Paul committed
304
            s.visit([&](auto v) { copy(v, std::back_inserter(op.dims)); });
Paul's avatar
Paul committed
305
        }
Paul's avatar
Paul committed
306
307
308
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
309
    instruction_ref
Paul's avatar
Paul committed
310
    parse_flatten(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
311
312
    {
        uint64_t axis = 0;
Paul's avatar
Paul committed
313
314
315
316
        if(contains(attributes, "axis"))
        {
            axis = parse_value(attributes.at("axis")).at<int>();
        }
317
        return prog.add_instruction(op::flatten{axis}, args[0]);
Paul's avatar
Paul committed
318
319
    }

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
    instruction_ref
    parse_squeeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::squeeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

    instruction_ref
    parse_unsqueeze(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::unsqueeze op;
        literal s = parse_value(attributes.at("axes"));
        s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        return prog.add_instruction(op, args[0]);
    }

Scott Thornton's avatar
Scott Thornton committed
338
339
340
341
342
343
344
    instruction_ref
    parse_concat(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        std::size_t axis = parse_value(attributes.at("axis")).at<int>();
        op::concat op{axis};
        return prog.add_instruction(op, std::move(args));
    }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365

    instruction_ref
    parse_slice(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
    {
        op::slice op;
        if(contains(attributes, "axes"))
        {
            literal s = parse_value(attributes.at("axes"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.axes)); });
        }
        {
            literal s = parse_value(attributes.at("ends"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.ends)); });
        }
        {
            literal s = parse_value(attributes.at("starts"));
            s.visit([&](auto v) { copy(v, std::back_inserter(op.starts)); });
        }
        return prog.add_instruction(op, args[0]);
    }

Paul's avatar
Paul committed
366
367
368
    instruction_ref parse_constant(const std::string&,
                                   attribute_map attributes,
                                   const std::vector<instruction_ref>&)
Paul's avatar
Paul committed
369
370
371
372
    {
        literal v = parse_value(attributes.at("value"));
        return prog.add_literal(v);
    }
Paul's avatar
Paul committed
373

Paul's avatar
Paul committed
374
    instruction_ref
Paul's avatar
Paul committed
375
    parse_gemm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Paul's avatar
Paul committed
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
    {
        float alpha = 1.0f;
        float beta  = 0.0f;
        bool transa = false;
        bool transb = false;
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        if(contains(attributes, "beta"))
        {
            alpha = parse_value(attributes.at("beta")).at<float>();
        }
        if(contains(attributes, "transA"))
        {
            transa = parse_value(attributes.at("transA")).at<bool>();
        }
        if(contains(attributes, "transB"))
        {
            transb = parse_value(attributes.at("transB")).at<bool>();
        }
        std::vector<int64_t> perm = {1, 0};
398
399
        auto l1 = (transa) ? prog.add_instruction(op::transpose{perm}, args[0]) : args[0];
        auto l2 = (transb) ? prog.add_instruction(op::transpose{perm}, args[1]) : args[1];
Paul's avatar
Paul committed
400
401
402
        if(args.size() == 3)
        {
            uint64_t axis = 1;
Shucai Xiao's avatar
Shucai Xiao committed
403
            auto l3       = prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Scott Thornton's avatar
Scott Thornton committed
404
            auto l4       = prog.add_instruction(op::broadcast{axis, l3->get_shape()}, args[2]);
405
            return prog.add_instruction(op::add{}, l3, l4);
Paul's avatar
Paul committed
406
        }
Shucai Xiao's avatar
Shucai Xiao committed
407
        return prog.add_instruction(op::dot{alpha, beta}, l1, l2);
Paul's avatar
Paul committed
408
409
    }

410
    instruction_ref
Paul's avatar
Paul committed
411
    parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
412
    {
Scott Thornton's avatar
Scott Thornton committed
413
414
        float epsilon                                     = 1e-5f;
        float momentum                                    = 0.9f;
415
        op::batch_norm_inference::bn_infer_mode_t bn_mode = op::batch_norm_inference::spatial;
Scott Thornton's avatar
Scott Thornton committed
416
        bool is_test                                      = false;
417
418
419
420
421
422
        if(contains(attributes, "epsilon"))
        {
            epsilon = parse_value(attributes.at("epsilon")).at<float>();
        }
        if(contains(attributes, "momentum"))
        {
423
            momentum = parse_value(attributes.at("momentum")).at<float>();
424
425
426
        }
        if(contains(attributes, "is_test"))
        {
wsttiger's avatar
wsttiger committed
427
            is_test = parse_value(attributes.at("is_test")).at<uint64_t>() > 0;
428
429
430
        }
        if(contains(attributes, "spatial"))
        {
431
            bn_mode = (parse_value(attributes.at("spatial")).at<uint64_t>() > 0)
432
433
                          ? op::batch_norm_inference::spatial
                          : op::batch_norm_inference::per_activation;
434
        }
Paul's avatar
Paul committed
435
        (void)is_test;
Paul's avatar
Paul committed
436
        op::batch_norm_inference op{epsilon, momentum, bn_mode};
Paul's avatar
Paul committed
437
        return prog.add_instruction(op, std::move(args));
438
439
    }

440
441
442
443
    instruction_ref parse_leaky_relu(const std::string&,
                                     attribute_map attributes,
                                     std::vector<instruction_ref> args)
    {
Khalique's avatar
Khalique committed
444
        float alpha = 0.01; // default alpha val for leaky relu
445
446
447
448
449
450
451
452
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::leaky_relu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
453
454
    instruction_ref
    parse_elu(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
455
456
457
458
459
460
461
462
463
464
    {
        float alpha = 1.0; // default alpha val for elu
        if(contains(attributes, "alpha"))
        {
            alpha = parse_value(attributes.at("alpha")).at<float>();
        }
        op::elu op{alpha};
        return prog.add_instruction(op, args.front());
    }

Khalique's avatar
Khalique committed
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
    instruction_ref parse_imagescaler(const std::string&,
                                      attribute_map attributes,
                                      std::vector<instruction_ref> args)
    {
        float scale = 1.0;
        std::vector<float> bias{};
        if(contains(attributes, "scale"))
        {
            scale = parse_value(attributes.at("scale")).at<float>();
        }

        if(contains(attributes, "bias"))
        {
            auto&& bias_floats = attributes["bias"].floats();
            bias               = std::vector<float>(bias_floats.begin(), bias_floats.end());
        }
        auto input_shape = args.front()->get_shape();
Khalique's avatar
Khalique committed
482

Khalique's avatar
Khalique committed
483
484
        auto scale_val = prog.add_literal(scale);
        auto bias_vals = prog.add_literal(
Paul's avatar
Paul committed
485
            migraphx::literal{migraphx::shape{migraphx::shape::float_type, {bias.size()}}, bias});
Khalique's avatar
Khalique committed
486

Paul's avatar
Paul committed
487
488
        auto scale_tensor = prog.add_instruction(migraphx::op::scalar{input_shape}, scale_val);
        auto img_scaled   = prog.add_instruction(migraphx::op::mul{}, args.front(), scale_tensor);
Paul's avatar
Paul committed
489
        auto bias_bcast = prog.add_instruction(migraphx::op::broadcast{1, input_shape}, bias_vals);
Paul's avatar
Paul committed
490
        return prog.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
Khalique's avatar
Khalique committed
491
    }
Khalique's avatar
Khalique committed
492

Khalique's avatar
Khalique committed
493
494
    instruction_ref
    parse_transpose(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
Khalique's avatar
Khalique committed
495
496
497
498
499
500
501
    {
        std::vector<int64_t> perm{};
        if(contains(attributes, "perm"))
        {
            auto&& perm_vals = attributes["perm"].ints();
            perm             = std::vector<int64_t>(perm_vals.begin(), perm_vals.end());
        }
Paul's avatar
Paul committed
502
        return prog.add_instruction(migraphx::op::transpose{perm}, args.front());
Khalique's avatar
Khalique committed
503
504
    }

Paul's avatar
Paul committed
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
    void parse_from(std::istream& is)
    {
        onnx::ModelProto model;
        if(model.ParseFromIstream(&is))
        {
            if(model.has_graph())
            {
                this->parse_graph(model.graph());
            }
        }
        else
        {
            throw std::runtime_error("Failed reading");
        }
    }

    void parse_graph(const onnx::GraphProto& graph)
    {
        nodes = get_nodes(graph);
524
525
526
527
528
        std::unordered_map<std::string, onnx::TensorProto> initializer_data;
        for(auto&& f : graph.initializer())
        {
            initializer_data[f.name()] = f;
        }
Paul's avatar
Paul committed
529
530
531
        for(auto&& input : graph.input())
        {
            const std::string& name = input.name();
532
533
534
535
536
537
538
539
540
541
542
543
            // Does the input have an initializer?
            if(contains(initializer_data, name))
            {
                auto t             = initializer_data[name];
                instructions[name] = prog.add_literal(parse_tensor(t));
            }
            else
            {
                // TODO: Get shape of input parameter
                shape s            = parse_type(input.type());
                instructions[name] = prog.add_parameter(name, s);
            }
Paul's avatar
Paul committed
544
545
546
        }
        for(auto&& p : nodes)
        {
547
            this->parse_node(get_name(p.second));
Paul's avatar
Paul committed
548
549
550
        }
    }

Paul's avatar
Paul committed
551
    void parse_node(const std::string& name)
Paul's avatar
Paul committed
552
    {
Paul's avatar
Paul committed
553
        if(name.empty())
Paul's avatar
Paul committed
554
            MIGRAPHX_THROW("Onnx node must have a name");
Paul's avatar
Paul committed
555
556
557
558
559
560
561
562
        if(instructions.count(name) == 0)
        {
            auto&& node = nodes.at(name);
            std::vector<instruction_ref> args;
            for(auto&& input : node.input())
            {
                if(nodes.count(input) > 0)
                {
563
                    auto&& iname = get_name(nodes.at(input));
Paul's avatar
Paul committed
564
                    assert(name != iname);
Paul's avatar
Paul committed
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
                    this->parse_node(iname);
                    args.push_back(instructions.at(iname));
                }
                else
                {
                    args.push_back(instructions.at(input));
                }
            }
            if(ops.count(node.op_type()) == 0)
            {
                instructions[name] = prog.add_instruction(unknown{node.op_type()}, args);
            }
            else
            {
                instructions[name] = ops[node.op_type()](get_attributes(node), args);
            }
        }
    }

    static attribute_map get_attributes(const onnx::NodeProto& node)
    {
        std::unordered_map<std::string, onnx::AttributeProto> result;
        for(auto&& attr : node.attribute())
        {
            result[attr.name()] = attr;
        }
        return result;
    }

594
595
596
597
    static std::string get_name(const onnx::NodeProto& node)
    {
        if(node.name().empty())
        {
Paul's avatar
Paul committed
598
            std::string generated = "migraphx_unnamed_node";
Paul's avatar
Paul committed
599
600
601
602
            return std::accumulate(node.output().begin(),
                                   node.output().end(),
                                   generated,
                                   [](auto x, auto y) { return x + "_" + y; });
603
604
605
606
        }
        return node.name();
    }

Paul's avatar
Paul committed
607
608
609
610
611
    static node_map get_nodes(const onnx::GraphProto& graph)
    {
        std::unordered_map<std::string, onnx::NodeProto> result;
        for(auto&& node : graph.node())
        {
612
            result[get_name(node)] = node;
Paul's avatar
Paul committed
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
            for(auto&& output : node.output())
            {
                result[output] = node;
            }
        }
        return result;
    }

    template <class T>
    static literal from_repeated(shape::type_t t, const T& r)
    {
        std::size_t size = r.size();
        return literal{{t, {size}}, r.begin(), r.end()};
    }

    static literal parse_value(const onnx::AttributeProto& attr)
    {
        switch(attr.type())
        {
        case onnx::AttributeProto::UNDEFINED: return {};
        case onnx::AttributeProto::FLOAT: return literal{attr.f()};
        case onnx::AttributeProto::INT: return literal{attr.i()};
        case onnx::AttributeProto::STRING: return {};
        case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
        case onnx::AttributeProto::GRAPH: return {};
Paul's avatar
Paul committed
638
        case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
Paul's avatar
Paul committed
639
640
641
642
643
        case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
        case onnx::AttributeProto::STRINGS: return {};
        case onnx::AttributeProto::TENSORS: return {};
        case onnx::AttributeProto::GRAPHS: return {};
        }
Paul's avatar
Paul committed
644
        MIGRAPHX_THROW("Invalid attribute type");
Paul's avatar
Paul committed
645
646
647
648
649
    }

    static literal parse_tensor(const onnx::TensorProto& t)
    {
        std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
650
651
        if(t.has_raw_data())
        {
wsttiger's avatar
wsttiger committed
652
            const std::string& s = t.raw_data();
Scott Thornton's avatar
Scott Thornton committed
653
654
655
656
657
658
659
660
661
662
663
664
            switch(t.data_type())
            {
            case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
            case onnx::TensorProto::FLOAT: return literal{{shape::float_type, dims}, s.data()};
            case onnx::TensorProto::UINT8: throw std::runtime_error("");
            case onnx::TensorProto::INT8: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::UINT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT16: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT32: return literal{{shape::int32_type, dims}, s.data()};
            case onnx::TensorProto::INT64: return literal{{shape::int64_type, dims}, s.data()};
            case onnx::TensorProto::STRING: throw std::runtime_error("");
            case onnx::TensorProto::BOOL: return literal{{shape::int32_type, dims}, s.data()};
Paul's avatar
Paul committed
665
            case onnx::TensorProto::FLOAT16: return literal{{shape::half_type, dims}, s.data()};
Scott Thornton's avatar
Scott Thornton committed
666
667
668
669
670
671
            case onnx::TensorProto::DOUBLE: return literal{{shape::double_type, dims}, s.data()};
            case onnx::TensorProto::UINT32: throw std::runtime_error("");
            case onnx::TensorProto::UINT64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
            case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
            }
Paul's avatar
Paul committed
672
            MIGRAPHX_THROW("Invalid tensor type");
673
        }
Paul's avatar
Paul committed
674
675
676
677
        switch(t.data_type())
        {
        case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
        case onnx::TensorProto::FLOAT:
Paul's avatar
Paul committed
678
            return literal{{shape::float_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
679
680
        case onnx::TensorProto::UINT8: throw std::runtime_error("");
        case onnx::TensorProto::INT8:
Paul's avatar
Paul committed
681
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
682
        case onnx::TensorProto::UINT16:
Paul's avatar
Paul committed
683
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
684
        case onnx::TensorProto::INT16:
Paul's avatar
Paul committed
685
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
686
        case onnx::TensorProto::INT32:
Paul's avatar
Paul committed
687
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
688
        case onnx::TensorProto::INT64:
Paul's avatar
Paul committed
689
            return literal{{shape::int64_type, dims}, t.int64_data().begin(), t.int64_data().end()};
Paul's avatar
Paul committed
690
691
        case onnx::TensorProto::STRING: throw std::runtime_error("");
        case onnx::TensorProto::BOOL:
Paul's avatar
Paul committed
692
            return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
Paul's avatar
Paul committed
693
694
        case onnx::TensorProto::FLOAT16:
            return literal{{shape::half_type, dims}, t.float_data().begin(), t.float_data().end()};
Paul's avatar
Paul committed
695
696
697
698
699
700
701
702
        case onnx::TensorProto::DOUBLE:
            return literal{
                {shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
        case onnx::TensorProto::UINT32: throw std::runtime_error("");
        case onnx::TensorProto::UINT64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
        case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
        }
Paul's avatar
Paul committed
703
        MIGRAPHX_THROW("Invalid tensor type");
Paul's avatar
Paul committed
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
    }

    static shape parse_type(const onnx::TypeProto& t)
    {
        shape::type_t shape_type{};
        switch(t.tensor_type().elem_type())
        {
        case onnx::TensorProto::UNDEFINED:
            break; // throw std::runtime_error("Unsupported type UNDEFINED");
        case onnx::TensorProto::FLOAT: shape_type = shape::float_type; break;
        case onnx::TensorProto::UINT8:
            break; // throw std::runtime_error("Unsupported type UINT8");
        case onnx::TensorProto::INT8: shape_type = shape::int8_type; break;
        case onnx::TensorProto::UINT16: shape_type = shape::uint16_type; break;
        case onnx::TensorProto::INT16: shape_type = shape::int16_type; break;
        case onnx::TensorProto::INT32: shape_type = shape::int32_type; break;
        case onnx::TensorProto::INT64: shape_type = shape::int64_type; break;
        case onnx::TensorProto::STRING:
            break; // throw std::runtime_error("Unsupported type STRING");
        case onnx::TensorProto::BOOL:
            break; // throw std::runtime_error("Unsupported type BOOL");
Paul's avatar
Paul committed
725
        case onnx::TensorProto::FLOAT16: shape_type = shape::half_type; break;
Paul's avatar
Paul committed
726
727
728
729
730
731
732
733
734
        case onnx::TensorProto::DOUBLE: shape_type = shape::double_type; break;
        case onnx::TensorProto::UINT32: shape_type = shape::uint32_type; break;
        case onnx::TensorProto::UINT64: shape_type = shape::uint64_type; break;
        case onnx::TensorProto::COMPLEX64:
            break; // throw std::runtime_error("Unsupported type COMPLEX64");
        case onnx::TensorProto::COMPLEX128:
            break; // throw std::runtime_error("Unsupported type COMPLEX128");
        }
        std::vector<std::size_t> dims;
Paul's avatar
Paul committed
735
        auto&& tensor_dims = t.tensor_type().shape().dim();
736
737
738
739
740
741
742
743
744
745
746
        std::transform(tensor_dims.begin(),
                       tensor_dims.end(),
                       std::back_inserter(dims),
                       [](auto&& d) -> std::size_t {
                           if(not d.has_dim_value())
                           {
                               long default_batch_size = 1; // FIXME
                               return default_batch_size;
                           }
                           return d.dim_value();
                       });
Paul's avatar
Paul committed
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
        return {shape_type, dims};
    }
};

program parse_onnx(const std::string& name)
{
    std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
    onnx_parser parser;
#ifndef NDEBUG
    // Log the program when it can't be parsed
    try
    {
        parser.parse_from(input);
    }
    catch(...)
    {
        std::cerr << parser.prog << std::endl;
        throw;
    }
#else
    parser.parse_from(input);
#endif
    return std::move(parser.prog);
}

Paul's avatar
Paul committed
772
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
773
} // namespace migraphx