"benchmark/kernels/vscode:/vscode.git/clone" did not exist on "7c3f07dbcba5fb36b889ab219a758663f111e599"
multitarget_test.cpp 28.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <iostream>
#include <set>
#include <unordered_set>
#include <vector>
#include <random>
#include <cmath>
#include <migraphx/program.hpp>
#include <migraphx/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/module.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/functional.hpp>
#include <basic_ops.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/register_target.hpp>
umangyadav's avatar
umangyadav committed
43
#include <migraphx/generate.hpp>
44
45
#include "migraphx/partitioner.hpp"
#include "migraphx/target_assignments.hpp"
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#include "test.hpp"

// check if it is custom_op or run_on_module operator
bool has_target_attr(const migraphx::instruction& ins)
{
    return ins.get_operator().attributes().contains("target");
}

auto nonprefixed_ops()
{
    // ops without prefixes
    static std::unordered_set<std::string> op_map = {
        "select_module", "load", "if", "nonmaxsuppression", "multibroadcast"};
    return op_map;
}

bool is_compiled_gpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "gpu::") and
               not migraphx::starts_with(ins_name, "hip::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_fpga_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "fpga::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_cpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "cpu::") and
               not migraphx::starts_with(ins_name, "dnnl::") and
               not migraphx::starts_with(ins_name, "check_context") and not has_target_attr(ins) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_ref_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if((not migraphx::starts_with(ins_name, "ref::") and
                not migraphx::starts_with(ins_name, "check_context") and
                not has_target_attr(ins)) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

// NOLINT
bool check_compiled_program(const migraphx::program& p,
                            const std::vector<migraphx::target>& targets)
{
    auto mods           = p.get_modules();
    bool check_compiled = true;
    for(const auto* mod : mods)
    {
        for(const auto& ins : *mod)
        {
            if(ins.name() == "run_on_target")
            {
                auto* mod_input = ins.module_inputs().front();
                std::size_t target_id =
                    ins.get_operator().to_value()["target_id"].to<std::size_t>();
                auto target_name = targets.at(target_id).name();
                if(target_name == "gpu")
                    check_compiled &= is_compiled_gpu_module(*mod_input);
                else if(target_name == "cpu")
                    check_compiled &= is_compiled_cpu_module(*mod_input);
                else if(target_name == "fpga")
                    check_compiled &= is_compiled_fpga_module(*mod_input);
                else if(target_name == "ref")
                    check_compiled &= is_compiled_ref_module(*mod_input);
            }
        }
    }
    return check_compiled;
}

163
TEST_CASE(multitarget_compile_cpu_gpu)
164
165
166
167
168
169
170
171
172
173
174
175
176
177
{
    migraphx::program p;
    auto* mm     = p.get_main_module();
    auto s       = migraphx::shape{migraphx::shape::float_type, {8}};
    auto x_param = mm->add_parameter("x", s);
    auto y_param = mm->add_parameter("y", s);
    auto z_param = mm->add_parameter("z", s);
    auto cpu_ins = mm->add_instruction(migraphx::make_op("add"), x_param, y_param);
    auto gpu_ins = mm->add_instruction(migraphx::make_op("add"), cpu_ins, z_param);
    mm->add_return({gpu_ins});
    migraphx::target_assignments tass;
    tass.insert(tass.begin(), std::make_pair(cpu_ins, 1));
    tass.insert(tass.begin(), std::make_pair(gpu_ins, 0));
    migraphx::partition(p, tass);
umangyadav's avatar
umangyadav committed
178
179
180
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
181
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
182
    migraphx::parameter_map params;
umangyadav's avatar
umangyadav committed
183
184
185
    params["x"] = migraphx::fill_argument(s, 1);
    params["y"] = migraphx::fill_argument(s, 2);
    params["z"] = migraphx::fill_argument(s, 3);
umangyadav's avatar
umangyadav committed
186
    auto result = p.eval(params).back();
umangyadav's avatar
umangyadav committed
187
188
    auto gold   = migraphx::fill_argument(s, 6);
    EXPECT(gold == result);
189
190
}

umangyadav's avatar
umangyadav committed
191
TEST_CASE(single_target_multi_compile)
192
193
194
{
    migraphx::program p;
    migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};
195
196
    auto* mm         = p.get_main_module();
    auto boxes_param = mm->add_parameter("boxes", boxes_s);
197
198
199

    migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
    std::vector<float> scores_vec = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
200
201
202
203
204
    auto scores_l                 = mm->add_literal(migraphx::literal(scores_s, scores_vec));
    auto max_out_l                = mm->add_literal(int64_t{4});
    auto iou_threshold            = mm->add_literal(0.5f);
    auto score_threshold          = mm->add_literal(0.0f);
    auto r                        = mm->add_instruction(
205
        migraphx::make_op("nonmaxsuppression",
umangyadav's avatar
umangyadav committed
206
                                                 {{"center_point_box", true}, {"use_dyn_output", true}}),
207
        boxes_param,
208
209
210
211
        scores_l,
        max_out_l,
        iou_threshold,
        score_threshold);
212
213
214
215
216
    mm->add_return({r});
    // do partition
    migraphx::target_assignments tass;
    tass.insert(tass.begin(), std::make_pair(r, 0));
    migraphx::partition(p, tass);
217
    // compile using multi-target compilation path
umangyadav's avatar
umangyadav committed
218
219
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
220
221
222
223
224
    // need to add "ref" to avoid ambigious call to "compile()"
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("ref")}, {gpu_opts});
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("ref")}));
    // eval
    migraphx::parameter_map params;
umangyadav's avatar
umangyadav committed
225
    std::vector<float> boxes_vec  = {0.5, 0.5,  1.0, 1.0, 0.5, 0.6,  1.0, 1.0, 0.5, 0.4,   1.0, 1.0,
226
                                    0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0};
umangyadav's avatar
umangyadav committed
227
228
229
230
231
232
    params["boxes"]               = migraphx::argument(boxes_s, boxes_vec.data());
    auto output                   = p.eval(params).back();
    std::vector<int64_t> gold_vec = {0, 0, 3, 0, 0, 0, 0, 0, 5};
    auto gold =
        migraphx::argument(migraphx::shape{migraphx::shape::int64_type, {3, 3}}, gold_vec.data());
    EXPECT(output == gold);
233
234
}

235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
TEST_CASE(multitarget_compile_if_then_else_partition)
{
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
    auto cond = mm->add_parameter("cond", cond_s);
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    auto x = mm->add_parameter("x", ds);
    auto y = mm->add_parameter("y", ds);

    auto* then_mod = p.create_module("if_gpu_mod");
    std::vector<float> data1(ds.elements(), 1);
    auto l1 = then_mod->add_literal(migraphx::literal(ds, data1));
    // auto gpu_x = then_mod->add_parameter("gpu_x", ds);
    auto a1 = then_mod->add_instruction(migraphx::make_op("add"), x, l1);
    then_mod->add_return({a1});

    auto* else_mod = p.create_module("else_cpu_mod");
    std::vector<float> data2(ds.elements(), 2);
    auto l2 = else_mod->add_literal(migraphx::literal(ds, data2));
    // auto cpu_y = else_mod->add_parameter("cpu_y", ds);
    auto a2 = else_mod->add_instruction(migraphx::make_op("mul"), y, l2);
    else_mod->add_return({a2});

    // auto* run_on_cpu_mod = p.create_module("run_on_cpu");
    // auto run_cpu_ins     = run_on_cpu_mod->add_instruction(
    //     migraphx::make_op("run_on_target", {{"target_id", 1}}), {y}, {else_mod});
    // auto run_cpu_ins_0 = run_on_cpu_mod->add_instruction(
    //     migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    // run_on_cpu_mod->add_return({run_cpu_ins_0});

    // auto* run_on_gpu_mod = p.create_module("run_on_gpu");
    // auto run_gpu_ins     = run_on_gpu_mod->add_instruction(
    //     migraphx::make_op("run_on_target", {{"target_id", 0}}), {x}, {then_mod});
    // auto run_gpu_ins_0 = run_on_gpu_mod->add_instruction(
    //     migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    // run_on_gpu_mod->add_return({run_gpu_ins_0});

    auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
    auto r   = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
    mm->add_return({r});
    p.debug_print();
    migraphx::target_assignments tass;
    tass.insert(tass.begin(), std::make_pair(l1, 0));
    tass.insert(tass.begin(), std::make_pair(a1, 0));
    tass.insert(tass.begin(), std::make_pair(l2, 1));
    tass.insert(tass.begin(), std::make_pair(a2, 1));
    migraphx::partition(p, tass);
    p.debug_print();
    // compile
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
    migraphx::parameter_map params;
    params["x"] = migraphx::fill_argument(ds, 2);
    params["y"] = migraphx::fill_argument(ds, 3);
    for(bool cond_val : {true, false})
    {
        params["cond"] = migraphx::argument(cond_s, &cond_val);
        auto result    = p.eval(params).back();
        auto gold      = migraphx::fill_argument(ds, (cond_val ? 3 : 6));
        EXPECT(gold == result);
    }
}

301
302
303
304
305
306
307
308
309
310
TEST_CASE(multitarget_compile_if_then_else)
{
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
    auto cond = mm->add_parameter("cond", cond_s);
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    auto x = mm->add_parameter("x", ds);
    auto y = mm->add_parameter("y", ds);

umangyadav's avatar
umangyadav committed
311
312
313
314
315
    auto* then_mod = p.create_module("if_gpu_mod");
    std::vector<float> data1(ds.elements(), 1);
    auto l1    = then_mod->add_literal(migraphx::literal(ds, data1));
    auto gpu_x = then_mod->add_parameter("gpu_x", ds);
    auto a1    = then_mod->add_instruction(migraphx::make_op("add"), gpu_x, l1);
316
317
    then_mod->add_return({a1});

umangyadav's avatar
umangyadav committed
318
319
320
321
322
    auto* else_mod = p.create_module("else_cpu_mod");
    std::vector<float> data2(ds.elements(), 2);
    auto l2    = else_mod->add_literal(migraphx::literal(ds, data2));
    auto cpu_y = else_mod->add_parameter("cpu_y", ds);
    auto a2    = else_mod->add_instruction(migraphx::make_op("mul"), cpu_y, l2);
323
324
325
326
    else_mod->add_return({a2});

    auto* run_on_cpu_mod = p.create_module("run_on_cpu");
    auto run_cpu_ins     = run_on_cpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
327
328
329
330
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {y}, {else_mod});
    auto run_cpu_ins_0 = run_on_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_on_cpu_mod->add_return({run_cpu_ins_0});
331
332
333

    auto* run_on_gpu_mod = p.create_module("run_on_gpu");
    auto run_gpu_ins     = run_on_gpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
334
335
336
337
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {x}, {then_mod});
    auto run_gpu_ins_0 = run_on_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_on_gpu_mod->add_return({run_gpu_ins_0});
338
339
340
341
342
343

    auto ret =
        mm->add_instruction(migraphx::make_op("if"), {cond}, {run_on_gpu_mod, run_on_cpu_mod});
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
    mm->add_return({r});
    // compile
umangyadav's avatar
umangyadav committed
344
345
346
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
347
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
348
    migraphx::parameter_map params;
umangyadav's avatar
umangyadav committed
349
350
    params["x"] = migraphx::fill_argument(ds, 2);
    params["y"] = migraphx::fill_argument(ds, 3);
umangyadav's avatar
umangyadav committed
351
352
353
354
    for(bool cond_val : {true, false})
    {
        params["cond"] = migraphx::argument(cond_s, &cond_val);
        auto result    = p.eval(params).back();
umangyadav's avatar
umangyadav committed
355
356
        auto gold      = migraphx::fill_argument(ds, (cond_val ? 3 : 6));
        EXPECT(gold == result);
umangyadav's avatar
umangyadav committed
357
    }
358
359
}

umangyadav's avatar
umangyadav committed
360
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
361
362
363
364
365
366
367
TEST_CASE(multitarget_compile_nested_if_then_else)
{
    std::unordered_map<std::size_t, std::size_t> counter_map = {{0, 0}, {1, 0}};
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
umangyadav's avatar
umangyadav committed
368
369
    auto cond_0             = mm->add_parameter("cond_0", cond_s);
    auto cond_1             = mm->add_parameter("cond_1", cond_s);
370
371
372
373
374
375
376
377
    auto x                  = mm->add_parameter("x", ds);
    auto y                  = mm->add_parameter("y", ds);
    auto z                  = mm->add_parameter("z", ds);
    auto create_test_module = [&](migraphx::program& prog,
                                  const std::vector<migraphx::instruction_ref>& inputs,
                                  std::size_t tid) {
        std::string mod_name =
            "target_" + std::to_string(tid) + "_" + std::to_string(counter_map[tid]++);
umangyadav's avatar
umangyadav committed
378
379
380
381
382
383
384
385
386
        auto* test_mod = prog.create_module(mod_name);
        std::vector<float> data(ds.elements(), -1);
        auto l1               = test_mod->add_literal(migraphx::literal(ds, data));
        auto test_mod_param_0 = test_mod->add_parameter(mod_name + "_param_0", ds);
        auto test_mod_param_1 = test_mod->add_parameter(mod_name + "_param_1", ds);
        auto test_mod_param_2 = test_mod->add_parameter(mod_name + "_param_2", ds);
        auto ins1 = test_mod->add_instruction(migraphx::make_op("add"), test_mod_param_0, l1);
        auto ins2 = test_mod->add_instruction(migraphx::make_op("mul"), ins1, test_mod_param_1);
        auto ins3 = test_mod->add_instruction(migraphx::make_op("sub"), ins2, test_mod_param_2);
387
388
        test_mod->add_return({ins3});
        auto* run_on_target_mod = prog.create_module("run_on_" + mod_name);
umangyadav's avatar
umangyadav committed
389
390
391
392
393
        auto run_ins            = run_on_target_mod->add_instruction(
            migraphx::make_op("run_on_target", {{"target_id", tid}}), inputs, {test_mod});
        auto run_ins_0 = run_on_target_mod->add_instruction(
            migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ins);
        run_on_target_mod->add_return({run_ins_0});
394
395
396
397
398
399
400
401
402
403
404
405
406
        return run_on_target_mod;
    };

    // create nested module with multiple targets.
    // then_mod has one instruction that runs a module on "ref" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* ref_mod = p.create_module("ref_mod");
    auto ref_x    = ref_mod->add_parameter("ref_x", ds);
    auto ref_y    = ref_mod->add_parameter("ref_y", ds);
    auto ref_add  = ref_mod->add_instruction(migraphx::make_op("add"), ref_x, ref_y);
    ref_mod->add_return({ref_add});

    auto* then_mod        = p.create_module("then_mod");
umangyadav's avatar
umangyadav committed
407
408
409
410
411
412
413
414
    auto then_mod_cond    = then_mod->add_parameter("then_mod_cond", cond_s);
    auto then_mod_param_0 = then_mod->add_parameter("then_mod_param_0", ds);
    auto then_mod_param_1 = then_mod->add_parameter("then_mod_param_1", ds);
    auto then_mod_param_2 = then_mod->add_parameter("then_mod_param_2", ds);
    auto then_mod_ref_ins =
        then_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 3}}),
                                  {then_mod_param_0, then_mod_param_1},
                                  {ref_mod});
415
416
    auto then_mod_ref_ins_0 = then_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_ref_ins);
umangyadav's avatar
umangyadav committed
417
    auto then_mod_if = then_mod->add_instruction(
418
        migraphx::make_op("if"),
umangyadav's avatar
umangyadav committed
419
420
421
422
423
424
425
426
427
428
429
430
        {then_mod_cond,
         then_mod_param_0,
         then_mod_param_1,
         then_mod_param_2,
         then_mod_ref_ins_0,
         then_mod_param_1,
         then_mod_param_2},
        {create_test_module(p, {then_mod_param_0, then_mod_param_1, then_mod_param_2}, 1),
         create_test_module(p, {then_mod_ref_ins_0, then_mod_param_1, then_mod_param_2}, 0)});
    auto then_mod_if_0 =
        then_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_if);
    then_mod->add_return({then_mod_if_0});
431
432
433
434
435
436
437
438
439
440

    // create nested else_mod with multiple targets.
    // else_mod has one instruction that runs a module on "fpga" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* fpga_mod = p.create_module("fpga_mod");
    auto fpga_x    = fpga_mod->add_parameter("fpga_x", ds);
    auto fpga_y    = fpga_mod->add_parameter("fpga_y", ds);
    auto fpga_add  = fpga_mod->add_instruction(migraphx::make_op("add"), fpga_x, fpga_y);
    fpga_mod->add_return({fpga_add});

umangyadav's avatar
umangyadav committed
441
442
443
444
445
446
447
448
449
    auto* else_mod        = p.create_module("else_mod");
    auto else_mod_cond    = else_mod->add_parameter("else_mod_cond", cond_s);
    auto else_mod_param_0 = else_mod->add_parameter("else_mod_param_0", ds);
    auto else_mod_param_1 = else_mod->add_parameter("else_mod_param_1", ds);
    auto else_mod_param_2 = else_mod->add_parameter("else_mod_param_2", ds);
    auto else_mod_fpga_ins =
        else_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 2}}),
                                  {else_mod_param_0, else_mod_param_2},
                                  {fpga_mod});
450
451
452
    auto else_mod_fpga_ins_0 = else_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_fpga_ins);

umangyadav's avatar
umangyadav committed
453
454
455
456
457
458
459
460
461
462
463
464
465
466
    auto else_mod_if = else_mod->add_instruction(
        migraphx::make_op("if"),
        {else_mod_cond,
         else_mod_fpga_ins_0,
         else_mod_param_0,
         else_mod_param_1,
         else_mod_param_2,
         else_mod_param_1,
         else_mod_param_0},
        {create_test_module(p, {else_mod_fpga_ins_0, else_mod_param_0, else_mod_param_1}, 0),
         create_test_module(p, {else_mod_param_2, else_mod_param_1, else_mod_param_0}, 1)});
    auto else_mod_if_0 =
        else_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_if);
    else_mod->add_return({else_mod_if_0});
467
468

    // Create nested and multi-target main module using "If"
umangyadav's avatar
umangyadav committed
469
470
    auto main_if_ins = mm->add_instruction(
        migraphx::make_op("if"), {cond_0, cond_1, x, y, z, cond_1, x, y, z}, {then_mod, else_mod});
471
472
473
474
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), main_if_ins);
    mm->add_return({r});

    // compile
umangyadav's avatar
umangyadav committed
475
476
477
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;

478
479
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
480
481
482
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
483
484
485
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
486
                                   migraphx::make_target("ref"),
487
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
488
489
    // do evaluation using different conditions
    migraphx::parameter_map params;
umangyadav's avatar
umangyadav committed
490
491
492
493
494
495
    float x_i   = 2.0;
    float y_i   = 3.0;
    float z_i   = 4.0;
    params["x"] = migraphx::fill_argument(ds, x_i);
    params["y"] = migraphx::fill_argument(ds, y_i);
    params["z"] = migraphx::fill_argument(ds, z_i);
umangyadav's avatar
umangyadav committed
496
497
498
499
500
501
502
503
504
505
506
    // cover all paths with different combination of conditions
    std::vector<std::pair<bool, bool>> test_conds = {
        {true, true}, {true, false}, {false, true}, {false, false}};
    for(auto [cond_val_0, cond_val_1] : test_conds)
    {
        params["cond_0"] = migraphx::argument(cond_s, &cond_val_0);
        params["cond_1"] = migraphx::argument(cond_s, &cond_val_1);
        auto result      = p.eval(params).back();
        // main has one instruction that is : if_then_else
        // then mod is doing : {tmp = x+y; (cond) ? (((x-1)*y)-z)  : (((tmp-1)*y)-z);}
        // else mod is doing : {tmp = x+z; (cond) ? (((tmp-1)*x)-y) : (((z-1)*y)-x);}
umangyadav's avatar
umangyadav committed
507
        float gold_i = -1.0;
umangyadav's avatar
umangyadav committed
508
509
        if(cond_val_0)
        {
umangyadav's avatar
umangyadav committed
510
511
            float tmp_i = x_i + y_i;
            gold_i      = (cond_val_1) ? (((x_i - 1) * y_i) - z_i) : (((tmp_i - 1) * y_i) - z_i);
umangyadav's avatar
umangyadav committed
512
513
514
        }
        else
        {
umangyadav's avatar
umangyadav committed
515
516
            float tmp_i = x_i + z_i;
            gold_i      = (cond_val_1) ? (((tmp_i - 1) * x_i) - y_i) : (((z_i - 1) * y_i) - x_i);
umangyadav's avatar
umangyadav committed
517
        }
umangyadav's avatar
umangyadav committed
518
519
        auto gold = migraphx::fill_argument(ds, gold_i);
        EXPECT(gold == result);
umangyadav's avatar
umangyadav committed
520
    }
521
522
}

umangyadav's avatar
umangyadav committed
523
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
524
525
526
527
528
529
530
531
TEST_CASE(multitarget_select_module)
{
    migraphx::program p;
    // create batch submodules
    auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
        auto* submod = p.create_module(module_name);
        migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
        auto sm_input = submod->add_parameter("data", sm_shape);
umangyadav's avatar
umangyadav committed
532
533
        migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
        auto literal_ins = submod->add_literal(migraphx::literal{lit_s, {6}});
534
535
536
537
        auto broadcast_lit =
            submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
        auto add_ins0 = submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
        auto add_ins1 = submod->add_instruction(migraphx::make_op("add"), add_ins0, broadcast_lit);
umangyadav's avatar
umangyadav committed
538
        submod->add_return({add_ins1});
539
540
541
542
543
544
545
546
        return submod;
    };
    auto* batch1 = create_submodule(1, "batch_1");
    auto* batch2 = create_submodule(2, "batch_2");
    auto* batch3 = create_submodule(3, "batch_3");
    auto* batch4 = create_submodule(4, "batch_4");

    auto* run_cpu_mod = p.create_module("cpu_mod");
umangyadav's avatar
umangyadav committed
547
548
    auto cpu_param =
        run_cpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {1, 4}});
549
550
    auto run_cpu_ins = run_cpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {cpu_param}, {batch1});
umangyadav's avatar
umangyadav committed
551
552
553
    auto run_cpu_ins_0 = run_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_cpu_mod->add_return({run_cpu_ins_0});
554
555

    auto* run_gpu_mod = p.create_module("gpu_mod");
umangyadav's avatar
umangyadav committed
556
557
    auto gpu_param =
        run_gpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 4}});
558
559
    auto run_gpu_ins = run_gpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {gpu_param}, {batch2});
umangyadav's avatar
umangyadav committed
560
561
562
    auto run_gpu_ins_0 = run_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_gpu_mod->add_return({run_gpu_ins_0});
563
564

    auto* run_fpga_mod = p.create_module("fpga_mod");
umangyadav's avatar
umangyadav committed
565
566
    auto fpga_param =
        run_fpga_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4}});
567
568
    auto run_fpga_ins = run_fpga_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 2}}), {fpga_param}, {batch3});
umangyadav's avatar
umangyadav committed
569
570
571
    auto run_fpga_ins_0 = run_fpga_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_fpga_ins);
    run_fpga_mod->add_return({run_fpga_ins_0});
572
573

    auto* run_ref_mod = p.create_module("ref_mod");
umangyadav's avatar
umangyadav committed
574
575
    auto ref_param =
        run_ref_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {4, 4}});
576
577
    auto run_ref_ins = run_ref_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 3}}), {ref_param}, {batch4});
umangyadav's avatar
umangyadav committed
578
579
580
    auto run_ref_ins_0 = run_ref_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ref_ins);
    run_ref_mod->add_return({run_ref_ins_0});
581

umangyadav's avatar
umangyadav committed
582
583
584
    auto* mm = p.get_main_module();
    migraphx::shape dyn_s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
    auto input                              = mm->add_parameter("data", dyn_s);
585
586
587
588
589
590
591
592
593
    std::vector<migraphx::shape> sub_shapes = {};
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    migraphx::shape out_attr = migraphx::shape{sub_shapes};
    auto sm_ins              = mm->add_instruction(
        migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
        {input},
        {run_cpu_mod, run_gpu_mod, run_fpga_mod, run_ref_mod});
    auto ret0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
umangyadav's avatar
umangyadav committed
594
    mm->add_return({ret0});
595
    // compile
umangyadav's avatar
umangyadav committed
596
597
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
598
599
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
600
601
602
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
603
604
605
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
606
                                   migraphx::make_target("ref"),
607
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
608
609
610
611
612
    // program does the 12+x where x has dynamic shape {{1, 4}, {4, 4}}
    for(const size_t bs : {1, 2, 3, 4})
    {
        migraphx::shape arg_shape{migraphx::shape::float_type, {bs, 4}};
        migraphx::parameter_map params;
umangyadav's avatar
umangyadav committed
613
614
615
616
617
618
619
620
        params["data"] = migraphx::generate_argument(arg_shape, arg_shape.elements());
        std::vector<float> input_data;
        params["data"].visit([&](const auto& vec) { input_data.assign(vec.begin(), vec.end()); });
        std::transform(input_data.begin(), input_data.end(), input_data.begin(), [](const auto& i) {
            return i + 12.0;
        });
        auto result = p.eval(params).back();
        EXPECT(migraphx::argument(arg_shape, input_data.data()) == result);
umangyadav's avatar
umangyadav committed
621
    }
622
623
624
}

int main(int argc, const char* argv[]) { test::run(argc, argv); }