multitarget_test.cpp 26.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <iostream>
#include <set>
#include <unordered_set>
#include <vector>
#include <random>
#include <cmath>
#include <migraphx/program.hpp>
#include <migraphx/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/module.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/functional.hpp>
#include <basic_ops.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/register_target.hpp>
#include "test.hpp"

// check if it is custom_op or run_on_module operator
bool has_target_attr(const migraphx::instruction& ins)
{
    return ins.get_operator().attributes().contains("target");
}

auto nonprefixed_ops()
{
    // ops without prefixes
    static std::unordered_set<std::string> op_map = {
        "select_module", "load", "if", "nonmaxsuppression", "multibroadcast"};
    return op_map;
}

bool is_compiled_gpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "gpu::") and
               not migraphx::starts_with(ins_name, "hip::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_fpga_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "fpga::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_cpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "cpu::") and
               not migraphx::starts_with(ins_name, "dnnl::") and
               not migraphx::starts_with(ins_name, "check_context") and not has_target_attr(ins) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_ref_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if((not migraphx::starts_with(ins_name, "ref::") and
                not migraphx::starts_with(ins_name, "check_context") and
                not has_target_attr(ins)) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

// NOLINT
bool check_compiled_program(const migraphx::program& p,
                            const std::vector<migraphx::target>& targets)
{
    auto mods           = p.get_modules();
    bool check_compiled = true;
    for(const auto* mod : mods)
    {
        for(const auto& ins : *mod)
        {
            if(ins.name() == "run_on_target")
            {
                auto* mod_input = ins.module_inputs().front();
                std::size_t target_id =
                    ins.get_operator().to_value()["target_id"].to<std::size_t>();
                auto target_name = targets.at(target_id).name();
                if(target_name == "gpu")
                    check_compiled &= is_compiled_gpu_module(*mod_input);
                else if(target_name == "cpu")
                    check_compiled &= is_compiled_cpu_module(*mod_input);
                else if(target_name == "fpga")
                    check_compiled &= is_compiled_fpga_module(*mod_input);
                else if(target_name == "ref")
                    check_compiled &= is_compiled_ref_module(*mod_input);
            }
        }
    }
    return check_compiled;
}

TEST_CASE(multitarget_compile_cpu_gpu)
{
    migraphx::program p;
    auto* mm      = p.get_main_module();
    auto* cpu_mod = p.create_module("cpu_mod");
    auto s        = migraphx::shape{migraphx::shape::float_type, {8}};
    auto x_cpu    = cpu_mod->add_parameter("cpu_x", s);
    auto y_cpu    = cpu_mod->add_parameter("cpu_y", s);
    auto cpu_add  = cpu_mod->add_instruction(migraphx::make_op("add"), x_cpu, y_cpu);
    cpu_mod->add_return({cpu_add});

    auto* gpu_mod = p.create_module("gpu_mod");
    auto x_gpu    = gpu_mod->add_parameter("gpu_x", s);
    auto y_gpu    = gpu_mod->add_parameter("gpu_y", s);
    auto gpu_add  = gpu_mod->add_instruction(migraphx::make_op("add"), x_gpu, y_gpu);
    gpu_mod->add_return({gpu_add});

    auto x_param = mm->add_parameter("x", s);
    auto y_param = mm->add_parameter("y", s);
    auto z_param = mm->add_parameter("z", s);
    auto cpu_ins = mm->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {x_param, y_param}, {cpu_mod});
umangyadav's avatar
umangyadav committed
183
184
    auto cpu_ins_0 =
        mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), cpu_ins);
185
    auto gpu_ins = mm->add_instruction(
umangyadav's avatar
umangyadav committed
186
187
188
189
190
191
192
193
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {cpu_ins_0, z_param}, {gpu_mod});
    auto gpu_ins_0 =
        mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), gpu_ins);

    mm->add_return({gpu_ins_0});
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
194
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
195
196
197
198
199
200
201
202
203
204
205
206
    migraphx::parameter_map params;
    std::vector<float> x_data(s.elements(), 1);
    std::vector<float> y_data(s.elements(), 2);
    std::vector<float> z_data(s.elements(), 3);
    params["x"] = migraphx::argument(s, x_data.data());
    params["y"] = migraphx::argument(s, y_data.data());
    params["z"] = migraphx::argument(s, z_data.data());
    auto result = p.eval(params).back();
    std::vector<float> result_vector;
    result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
    std::vector<float> gold(s.elements(), 6);
    EXPECT(migraphx::verify_range(gold, result_vector));
207
208
}

umangyadav's avatar
umangyadav committed
209
TEST_CASE(single_target_multi_compile)
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
{
    migraphx::program p;
    auto* mm = p.get_main_module();

    migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};

    migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
    std::vector<float> scores_vec = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};

    auto boxes_l         = mm->add_parameter("boxes", boxes_s);
    auto scores_l        = mm->add_literal(migraphx::literal(scores_s, scores_vec));
    auto max_out_l       = mm->add_literal(int64_t{4});
    auto iou_threshold   = mm->add_literal(0.5f);
    auto score_threshold = mm->add_literal(0.0f);

    auto r = mm->add_instruction(migraphx::make_op("nonmaxsuppression", {{"center_point_box", 1}}),
                                 boxes_l,
                                 scores_l,
                                 max_out_l,
                                 iou_threshold,
                                 score_threshold);
    mm->add_return({r});
umangyadav's avatar
umangyadav committed
232
233
234
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu")}, {gpu_opts});
235
236
237
238
239
240
241
242
243
244
245
246
247
    EXPECT(is_compiled_gpu_module(*p.get_main_module()));
}

TEST_CASE(multitarget_compile_if_then_else)
{
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
    auto cond = mm->add_parameter("cond", cond_s);
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    auto x = mm->add_parameter("x", ds);
    auto y = mm->add_parameter("y", ds);

umangyadav's avatar
umangyadav committed
248
249
250
251
252
    auto* then_mod = p.create_module("if_gpu_mod");
    std::vector<float> data1(ds.elements(), 1);
    auto l1    = then_mod->add_literal(migraphx::literal(ds, data1));
    auto gpu_x = then_mod->add_parameter("gpu_x", ds);
    auto a1    = then_mod->add_instruction(migraphx::make_op("add"), gpu_x, l1);
253
254
    then_mod->add_return({a1});

umangyadav's avatar
umangyadav committed
255
256
257
258
259
    auto* else_mod = p.create_module("else_cpu_mod");
    std::vector<float> data2(ds.elements(), 2);
    auto l2    = else_mod->add_literal(migraphx::literal(ds, data2));
    auto cpu_y = else_mod->add_parameter("cpu_y", ds);
    auto a2    = else_mod->add_instruction(migraphx::make_op("mul"), cpu_y, l2);
260
261
262
263
    else_mod->add_return({a2});

    auto* run_on_cpu_mod = p.create_module("run_on_cpu");
    auto run_cpu_ins     = run_on_cpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
264
265
266
267
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {y}, {else_mod});
    auto run_cpu_ins_0 = run_on_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_on_cpu_mod->add_return({run_cpu_ins_0});
268
269
270

    auto* run_on_gpu_mod = p.create_module("run_on_gpu");
    auto run_gpu_ins     = run_on_gpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
271
272
273
274
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {x}, {then_mod});
    auto run_gpu_ins_0 = run_on_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_on_gpu_mod->add_return({run_gpu_ins_0});
275
276
277
278
279
280

    auto ret =
        mm->add_instruction(migraphx::make_op("if"), {cond}, {run_on_gpu_mod, run_on_cpu_mod});
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
    mm->add_return({r});
    // compile
umangyadav's avatar
umangyadav committed
281
282
283
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
284
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
    migraphx::parameter_map params;
    std::vector<float> x_data(ds.elements(), 2);
    std::vector<float> y_data(ds.elements(), 3);
    params["x"] = migraphx::argument(ds, x_data.data());
    params["y"] = migraphx::argument(ds, y_data.data());
    for(bool cond_val : {true, false})
    {
        params["cond"] = migraphx::argument(cond_s, &cond_val);
        auto result    = p.eval(params).back();
        std::vector<float> result_vector;
        result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
        std::vector<float> gold(ds.elements(), (cond_val ? 3 : 6));
        EXPECT(migraphx::verify_range(gold, result_vector));
    }
299
300
}

umangyadav's avatar
umangyadav committed
301
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
302
303
304
305
306
307
308
TEST_CASE(multitarget_compile_nested_if_then_else)
{
    std::unordered_map<std::size_t, std::size_t> counter_map = {{0, 0}, {1, 0}};
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
umangyadav's avatar
umangyadav committed
309
310
    auto cond_0             = mm->add_parameter("cond_0", cond_s);
    auto cond_1             = mm->add_parameter("cond_1", cond_s);
311
312
313
314
315
316
317
318
    auto x                  = mm->add_parameter("x", ds);
    auto y                  = mm->add_parameter("y", ds);
    auto z                  = mm->add_parameter("z", ds);
    auto create_test_module = [&](migraphx::program& prog,
                                  const std::vector<migraphx::instruction_ref>& inputs,
                                  std::size_t tid) {
        std::string mod_name =
            "target_" + std::to_string(tid) + "_" + std::to_string(counter_map[tid]++);
umangyadav's avatar
umangyadav committed
319
320
321
322
323
324
325
326
327
        auto* test_mod = prog.create_module(mod_name);
        std::vector<float> data(ds.elements(), -1);
        auto l1               = test_mod->add_literal(migraphx::literal(ds, data));
        auto test_mod_param_0 = test_mod->add_parameter(mod_name + "_param_0", ds);
        auto test_mod_param_1 = test_mod->add_parameter(mod_name + "_param_1", ds);
        auto test_mod_param_2 = test_mod->add_parameter(mod_name + "_param_2", ds);
        auto ins1 = test_mod->add_instruction(migraphx::make_op("add"), test_mod_param_0, l1);
        auto ins2 = test_mod->add_instruction(migraphx::make_op("mul"), ins1, test_mod_param_1);
        auto ins3 = test_mod->add_instruction(migraphx::make_op("sub"), ins2, test_mod_param_2);
328
329
        test_mod->add_return({ins3});
        auto* run_on_target_mod = prog.create_module("run_on_" + mod_name);
umangyadav's avatar
umangyadav committed
330
331
332
333
334
        auto run_ins            = run_on_target_mod->add_instruction(
            migraphx::make_op("run_on_target", {{"target_id", tid}}), inputs, {test_mod});
        auto run_ins_0 = run_on_target_mod->add_instruction(
            migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ins);
        run_on_target_mod->add_return({run_ins_0});
335
336
337
338
339
340
341
342
343
344
345
346
347
        return run_on_target_mod;
    };

    // create nested module with multiple targets.
    // then_mod has one instruction that runs a module on "ref" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* ref_mod = p.create_module("ref_mod");
    auto ref_x    = ref_mod->add_parameter("ref_x", ds);
    auto ref_y    = ref_mod->add_parameter("ref_y", ds);
    auto ref_add  = ref_mod->add_instruction(migraphx::make_op("add"), ref_x, ref_y);
    ref_mod->add_return({ref_add});

    auto* then_mod        = p.create_module("then_mod");
umangyadav's avatar
umangyadav committed
348
349
350
351
352
353
354
355
    auto then_mod_cond    = then_mod->add_parameter("then_mod_cond", cond_s);
    auto then_mod_param_0 = then_mod->add_parameter("then_mod_param_0", ds);
    auto then_mod_param_1 = then_mod->add_parameter("then_mod_param_1", ds);
    auto then_mod_param_2 = then_mod->add_parameter("then_mod_param_2", ds);
    auto then_mod_ref_ins =
        then_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 3}}),
                                  {then_mod_param_0, then_mod_param_1},
                                  {ref_mod});
356
357
    auto then_mod_ref_ins_0 = then_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_ref_ins);
umangyadav's avatar
umangyadav committed
358
    auto then_mod_if = then_mod->add_instruction(
359
        migraphx::make_op("if"),
umangyadav's avatar
umangyadav committed
360
361
362
363
364
365
366
367
368
369
370
371
        {then_mod_cond,
         then_mod_param_0,
         then_mod_param_1,
         then_mod_param_2,
         then_mod_ref_ins_0,
         then_mod_param_1,
         then_mod_param_2},
        {create_test_module(p, {then_mod_param_0, then_mod_param_1, then_mod_param_2}, 1),
         create_test_module(p, {then_mod_ref_ins_0, then_mod_param_1, then_mod_param_2}, 0)});
    auto then_mod_if_0 =
        then_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_if);
    then_mod->add_return({then_mod_if_0});
372
373
374
375
376
377
378
379
380
381

    // create nested else_mod with multiple targets.
    // else_mod has one instruction that runs a module on "fpga" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* fpga_mod = p.create_module("fpga_mod");
    auto fpga_x    = fpga_mod->add_parameter("fpga_x", ds);
    auto fpga_y    = fpga_mod->add_parameter("fpga_y", ds);
    auto fpga_add  = fpga_mod->add_instruction(migraphx::make_op("add"), fpga_x, fpga_y);
    fpga_mod->add_return({fpga_add});

umangyadav's avatar
umangyadav committed
382
383
384
385
386
387
388
389
390
    auto* else_mod        = p.create_module("else_mod");
    auto else_mod_cond    = else_mod->add_parameter("else_mod_cond", cond_s);
    auto else_mod_param_0 = else_mod->add_parameter("else_mod_param_0", ds);
    auto else_mod_param_1 = else_mod->add_parameter("else_mod_param_1", ds);
    auto else_mod_param_2 = else_mod->add_parameter("else_mod_param_2", ds);
    auto else_mod_fpga_ins =
        else_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 2}}),
                                  {else_mod_param_0, else_mod_param_2},
                                  {fpga_mod});
391
392
393
    auto else_mod_fpga_ins_0 = else_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_fpga_ins);

umangyadav's avatar
umangyadav committed
394
395
396
397
398
399
400
401
402
403
404
405
406
407
    auto else_mod_if = else_mod->add_instruction(
        migraphx::make_op("if"),
        {else_mod_cond,
         else_mod_fpga_ins_0,
         else_mod_param_0,
         else_mod_param_1,
         else_mod_param_2,
         else_mod_param_1,
         else_mod_param_0},
        {create_test_module(p, {else_mod_fpga_ins_0, else_mod_param_0, else_mod_param_1}, 0),
         create_test_module(p, {else_mod_param_2, else_mod_param_1, else_mod_param_0}, 1)});
    auto else_mod_if_0 =
        else_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_if);
    else_mod->add_return({else_mod_if_0});
408
409

    // Create nested and multi-target main module using "If"
umangyadav's avatar
umangyadav committed
410
411
    auto main_if_ins = mm->add_instruction(
        migraphx::make_op("if"), {cond_0, cond_1, x, y, z, cond_1, x, y, z}, {then_mod, else_mod});
412
413
414
415
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), main_if_ins);
    mm->add_return({r});

    // compile
umangyadav's avatar
umangyadav committed
416
417
418
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;

419
420
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
421
422
423
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
424
425
426
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
427
                                   migraphx::make_target("ref"),
428
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
    // do evaluation using different conditions
    // TODO: make two conditional to cover all the paths
    migraphx::parameter_map params;
    int x_i = 2, y_i = 3, z_i = 4;
    std::vector<float> x_data(ds.elements(), x_i);
    std::vector<float> y_data(ds.elements(), y_i);
    std::vector<float> z_data(ds.elements(), z_i);
    params["x"] = migraphx::argument(ds, x_data.data());
    params["y"] = migraphx::argument(ds, y_data.data());
    params["z"] = migraphx::argument(ds, z_data.data());
    // cover all paths with different combination of conditions
    std::vector<std::pair<bool, bool>> test_conds = {
        {true, true}, {true, false}, {false, true}, {false, false}};
    for(auto [cond_val_0, cond_val_1] : test_conds)
    {
        params["cond_0"] = migraphx::argument(cond_s, &cond_val_0);
        params["cond_1"] = migraphx::argument(cond_s, &cond_val_1);
        auto result      = p.eval(params).back();
        // main has one instruction that is : if_then_else
        // then mod is doing : {tmp = x+y; (cond) ? (((x-1)*y)-z)  : (((tmp-1)*y)-z);}
        // else mod is doing : {tmp = x+z; (cond) ? (((tmp-1)*x)-y) : (((z-1)*y)-x);}
        int gold_i = -1;
        if(cond_val_0)
        {
            int tmp_i = x_i + y_i;
            gold_i    = (cond_val_1) ? (((x_i - 1) * y_i) - z_i) : (((tmp_i - 1) * y_i) - z_i);
        }
        else
        {
            int tmp_i = x_i + z_i;
            gold_i    = (cond_val_1) ? (((tmp_i - 1) * x_i) - y_i) : (((z_i - 1) * y_i) - x_i);
        }
        std::vector<float> result_vector;
        result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
        std::vector<float> gold(ds.elements(), gold_i);
        EXPECT(migraphx::verify_range(gold, result_vector));
    }
466
467
}

umangyadav's avatar
umangyadav committed
468
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
469
470
471
472
473
474
475
476
TEST_CASE(multitarget_select_module)
{
    migraphx::program p;
    // create batch submodules
    auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
        auto* submod = p.create_module(module_name);
        migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
        auto sm_input = submod->add_parameter("data", sm_shape);
umangyadav's avatar
umangyadav committed
477
478
        migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
        auto literal_ins = submod->add_literal(migraphx::literal{lit_s, {6}});
479
480
481
482
        auto broadcast_lit =
            submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
        auto add_ins0 = submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
        auto add_ins1 = submod->add_instruction(migraphx::make_op("add"), add_ins0, broadcast_lit);
umangyadav's avatar
umangyadav committed
483
        submod->add_return({add_ins1});
484
485
486
487
488
489
490
491
        return submod;
    };
    auto* batch1 = create_submodule(1, "batch_1");
    auto* batch2 = create_submodule(2, "batch_2");
    auto* batch3 = create_submodule(3, "batch_3");
    auto* batch4 = create_submodule(4, "batch_4");

    auto* run_cpu_mod = p.create_module("cpu_mod");
umangyadav's avatar
umangyadav committed
492
493
    auto cpu_param =
        run_cpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {1, 4}});
494
495
    auto run_cpu_ins = run_cpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {cpu_param}, {batch1});
umangyadav's avatar
umangyadav committed
496
497
498
    auto run_cpu_ins_0 = run_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_cpu_mod->add_return({run_cpu_ins_0});
499
500

    auto* run_gpu_mod = p.create_module("gpu_mod");
umangyadav's avatar
umangyadav committed
501
502
    auto gpu_param =
        run_gpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 4}});
503
504
    auto run_gpu_ins = run_gpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {gpu_param}, {batch2});
umangyadav's avatar
umangyadav committed
505
506
507
    auto run_gpu_ins_0 = run_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_gpu_mod->add_return({run_gpu_ins_0});
508
509

    auto* run_fpga_mod = p.create_module("fpga_mod");
umangyadav's avatar
umangyadav committed
510
511
    auto fpga_param =
        run_fpga_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4}});
512
513
    auto run_fpga_ins = run_fpga_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 2}}), {fpga_param}, {batch3});
umangyadav's avatar
umangyadav committed
514
515
516
    auto run_fpga_ins_0 = run_fpga_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_fpga_ins);
    run_fpga_mod->add_return({run_fpga_ins_0});
517
518

    auto* run_ref_mod = p.create_module("ref_mod");
umangyadav's avatar
umangyadav committed
519
520
    auto ref_param =
        run_ref_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {4, 4}});
521
522
    auto run_ref_ins = run_ref_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 3}}), {ref_param}, {batch4});
umangyadav's avatar
umangyadav committed
523
524
525
    auto run_ref_ins_0 = run_ref_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ref_ins);
    run_ref_mod->add_return({run_ref_ins_0});
526

umangyadav's avatar
umangyadav committed
527
528
529
    auto* mm = p.get_main_module();
    migraphx::shape dyn_s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
    auto input                              = mm->add_parameter("data", dyn_s);
530
531
532
533
534
535
536
537
538
    std::vector<migraphx::shape> sub_shapes = {};
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    migraphx::shape out_attr = migraphx::shape{sub_shapes};
    auto sm_ins              = mm->add_instruction(
        migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
        {input},
        {run_cpu_mod, run_gpu_mod, run_fpga_mod, run_ref_mod});
    auto ret0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
umangyadav's avatar
umangyadav committed
539
    mm->add_return({ret0});
540
    // compile
umangyadav's avatar
umangyadav committed
541
542
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
543
544
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
545
546
547
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
548
549
550
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
551
                                   migraphx::make_target("ref"),
552
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
    // program does the 12+x where x has dynamic shape {{1, 4}, {4, 4}}
    float seed = 0.0f;
    std::mt19937 gen(seed);
    std::uniform_real_distribution<> dis(0.0, 1.0);
    auto get_random_values = [&](size_t elements) {
        std::vector<float> rand_samples(elements);
        std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
        return rand_samples;
    };
    for(const size_t bs : {1, 2, 3, 4})
    {
        migraphx::shape arg_shape{migraphx::shape::float_type, {bs, 4}};
        std::vector<float> data = get_random_values(arg_shape.elements());
        migraphx::parameter_map params;
        params["data"] = migraphx::argument(arg_shape, data.data());
        auto result    = p.eval(params).back();
        std::vector<float> result_vec;
        result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
        std::vector<float> gold = data;
        std::transform(gold.begin(), gold.end(), gold.begin(), [&](auto i) { return i + 12; });
        EXPECT(migraphx::verify_range(gold, result_vec));
    }
575
576
577
}

int main(int argc, const char* argv[]) { test::run(argc, argv); }