multitarget_test.cpp 27.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <iostream>
#include <set>
#include <unordered_set>
#include <vector>
#include <random>
#include <cmath>
#include <migraphx/program.hpp>
#include <migraphx/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/module.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/functional.hpp>
#include <basic_ops.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/register_target.hpp>
#include "test.hpp"

// check if it is custom_op or run_on_module operator
bool has_target_attr(const migraphx::instruction& ins)
{
    return ins.get_operator().attributes().contains("target");
}

auto nonprefixed_ops()
{
    // ops without prefixes
    static std::unordered_set<std::string> op_map = {
        "select_module", "load", "if", "nonmaxsuppression", "multibroadcast"};
    return op_map;
}

bool is_compiled_gpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "gpu::") and
               not migraphx::starts_with(ins_name, "hip::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_fpga_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "fpga::") and
               not migraphx::starts_with(ins_name, "check_context") and
               not migraphx::contains(nonprefixed_ops(), ins_name) and not has_target_attr(ins))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_cpu_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if(not migraphx::starts_with(ins_name, "cpu::") and
               not migraphx::starts_with(ins_name, "dnnl::") and
               not migraphx::starts_with(ins_name, "check_context") and not has_target_attr(ins) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

bool is_compiled_ref_module(const migraphx::module& m)
{
    return std::all_of(m.begin(), m.end(), [](auto ins) {
        auto ins_name = ins.name();
        if(not migraphx::starts_with(ins_name, "@"))
        {
            if((not migraphx::starts_with(ins_name, "ref::") and
                not migraphx::starts_with(ins_name, "check_context") and
                not has_target_attr(ins)) and
               not migraphx::contains(nonprefixed_ops(), ins_name))
            {
                return false;
            }
        }
        return true;
    });
}

// NOLINT
bool check_compiled_program(const migraphx::program& p,
                            const std::vector<migraphx::target>& targets)
{
    auto mods           = p.get_modules();
    bool check_compiled = true;
    for(const auto* mod : mods)
    {
        for(const auto& ins : *mod)
        {
            if(ins.name() == "run_on_target")
            {
                auto* mod_input = ins.module_inputs().front();
                std::size_t target_id =
                    ins.get_operator().to_value()["target_id"].to<std::size_t>();
                auto target_name = targets.at(target_id).name();
                if(target_name == "gpu")
                    check_compiled &= is_compiled_gpu_module(*mod_input);
                else if(target_name == "cpu")
                    check_compiled &= is_compiled_cpu_module(*mod_input);
                else if(target_name == "fpga")
                    check_compiled &= is_compiled_fpga_module(*mod_input);
                else if(target_name == "ref")
                    check_compiled &= is_compiled_ref_module(*mod_input);
            }
        }
    }
    return check_compiled;
}

TEST_CASE(multitarget_compile_cpu_gpu)
{
    migraphx::program p;
    auto* mm      = p.get_main_module();
    auto* cpu_mod = p.create_module("cpu_mod");
    auto s        = migraphx::shape{migraphx::shape::float_type, {8}};
    auto x_cpu    = cpu_mod->add_parameter("cpu_x", s);
    auto y_cpu    = cpu_mod->add_parameter("cpu_y", s);
    auto cpu_add  = cpu_mod->add_instruction(migraphx::make_op("add"), x_cpu, y_cpu);
    cpu_mod->add_return({cpu_add});

    auto* gpu_mod = p.create_module("gpu_mod");
    auto x_gpu    = gpu_mod->add_parameter("gpu_x", s);
    auto y_gpu    = gpu_mod->add_parameter("gpu_y", s);
    auto gpu_add  = gpu_mod->add_instruction(migraphx::make_op("add"), x_gpu, y_gpu);
    gpu_mod->add_return({gpu_add});

    auto x_param = mm->add_parameter("x", s);
    auto y_param = mm->add_parameter("y", s);
    auto z_param = mm->add_parameter("z", s);
    auto cpu_ins = mm->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {x_param, y_param}, {cpu_mod});
umangyadav's avatar
umangyadav committed
183
184
    auto cpu_ins_0 =
        mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), cpu_ins);
185
    auto gpu_ins = mm->add_instruction(
umangyadav's avatar
umangyadav committed
186
187
188
189
190
191
192
193
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {cpu_ins_0, z_param}, {gpu_mod});
    auto gpu_ins_0 =
        mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), gpu_ins);

    mm->add_return({gpu_ins_0});
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
194
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
195
196
197
198
199
200
201
202
203
204
205
206
    migraphx::parameter_map params;
    std::vector<float> x_data(s.elements(), 1);
    std::vector<float> y_data(s.elements(), 2);
    std::vector<float> z_data(s.elements(), 3);
    params["x"] = migraphx::argument(s, x_data.data());
    params["y"] = migraphx::argument(s, y_data.data());
    params["z"] = migraphx::argument(s, z_data.data());
    auto result = p.eval(params).back();
    std::vector<float> result_vector;
    result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
    std::vector<float> gold(s.elements(), 6);
    EXPECT(migraphx::verify_range(gold, result_vector));
207
208
}

umangyadav's avatar
umangyadav committed
209
TEST_CASE(single_target_multi_compile)
210
211
212
{
    migraphx::program p;
    migraphx::shape boxes_s{migraphx::shape::float_type, {1, 6, 4}};
213
214
    auto* mm         = p.get_main_module();
    auto boxes_param = mm->add_parameter("boxes", boxes_s);
215

216
217
    auto* gpu_mod        = p.create_module("gpu_mod");
    auto boxes_param_gpu = gpu_mod->add_parameter("boxes_param_gpu", boxes_s);
218
219
    migraphx::shape scores_s{migraphx::shape::float_type, {1, 1, 6}};
    std::vector<float> scores_vec = {0.9, 0.75, 0.6, 0.95, 0.5, 0.3};
220
221
222
223
224
225
226
227
228
229
230
231
232
    auto scores_l                 = gpu_mod->add_literal(migraphx::literal(scores_s, scores_vec));
    auto max_out_l                = gpu_mod->add_literal(int64_t{4});
    auto iou_threshold            = gpu_mod->add_literal(0.5f);
    auto score_threshold          = gpu_mod->add_literal(0.0f);
    auto r                        = gpu_mod->add_instruction(
        migraphx::make_op("nonmaxsuppression",
                          {{"center_point_box", true}, {"use_dyn_output", true}}),
        boxes_param_gpu,
        scores_l,
        max_out_l,
        iou_threshold,
        score_threshold);
    gpu_mod->add_return({r});
233

234
235
236
237
238
239
240
    auto run_on_gpu = mm->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {boxes_param}, {gpu_mod});
    auto run_on_gpu_0 =
        mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_on_gpu);
    mm->add_return({run_on_gpu_0});

    // compile using multi-target compilation path
umangyadav's avatar
umangyadav committed
241
242
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
243
244
245
246
247
248
249
250
251
252
253
254
255
    // need to add "ref" to avoid ambigious call to "compile()"
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("ref")}, {gpu_opts});
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("ref")}));
    // eval
    migraphx::parameter_map params;
    std::vector<float> boxes_vec = {0.5, 0.5,  1.0, 1.0, 0.5, 0.6,  1.0, 1.0, 0.5, 0.4,   1.0, 1.0,
                                    0.5, 10.5, 1.0, 1.0, 0.5, 10.6, 1.0, 1.0, 0.5, 100.5, 1.0, 1.0};
    params["boxes"]              = migraphx::argument(boxes_s, boxes_vec.data());
    auto output                  = p.eval(params).back();
    std::vector<int64_t> result;
    output.visit([&](auto out) { result.assign(out.begin(), out.end()); });
    std::vector<int64_t> gold = {0, 0, 3, 0, 0, 0, 0, 0, 5};
    EXPECT(migraphx::verify_range(result, gold));
256
257
258
259
260
261
262
263
264
265
266
267
}

TEST_CASE(multitarget_compile_if_then_else)
{
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
    auto cond = mm->add_parameter("cond", cond_s);
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    auto x = mm->add_parameter("x", ds);
    auto y = mm->add_parameter("y", ds);

umangyadav's avatar
umangyadav committed
268
269
270
271
272
    auto* then_mod = p.create_module("if_gpu_mod");
    std::vector<float> data1(ds.elements(), 1);
    auto l1    = then_mod->add_literal(migraphx::literal(ds, data1));
    auto gpu_x = then_mod->add_parameter("gpu_x", ds);
    auto a1    = then_mod->add_instruction(migraphx::make_op("add"), gpu_x, l1);
273
274
    then_mod->add_return({a1});

umangyadav's avatar
umangyadav committed
275
276
277
278
279
    auto* else_mod = p.create_module("else_cpu_mod");
    std::vector<float> data2(ds.elements(), 2);
    auto l2    = else_mod->add_literal(migraphx::literal(ds, data2));
    auto cpu_y = else_mod->add_parameter("cpu_y", ds);
    auto a2    = else_mod->add_instruction(migraphx::make_op("mul"), cpu_y, l2);
280
281
282
283
    else_mod->add_return({a2});

    auto* run_on_cpu_mod = p.create_module("run_on_cpu");
    auto run_cpu_ins     = run_on_cpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
284
285
286
287
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {y}, {else_mod});
    auto run_cpu_ins_0 = run_on_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_on_cpu_mod->add_return({run_cpu_ins_0});
288
289
290

    auto* run_on_gpu_mod = p.create_module("run_on_gpu");
    auto run_gpu_ins     = run_on_gpu_mod->add_instruction(
umangyadav's avatar
umangyadav committed
291
292
293
294
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {x}, {then_mod});
    auto run_gpu_ins_0 = run_on_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_on_gpu_mod->add_return({run_gpu_ins_0});
295
296
297
298
299
300

    auto ret =
        mm->add_instruction(migraphx::make_op("if"), {cond}, {run_on_gpu_mod, run_on_cpu_mod});
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), ret);
    mm->add_return({r});
    // compile
umangyadav's avatar
umangyadav committed
301
302
303
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
    p.compile({migraphx::make_target("gpu"), migraphx::make_target("cpu")}, {gpu_opts});
304
    EXPECT(check_compiled_program(p, {migraphx::make_target("gpu"), migraphx::make_target("cpu")}));
umangyadav's avatar
umangyadav committed
305
306
307
308
309
310
311
312
313
314
315
316
317
318
    migraphx::parameter_map params;
    std::vector<float> x_data(ds.elements(), 2);
    std::vector<float> y_data(ds.elements(), 3);
    params["x"] = migraphx::argument(ds, x_data.data());
    params["y"] = migraphx::argument(ds, y_data.data());
    for(bool cond_val : {true, false})
    {
        params["cond"] = migraphx::argument(cond_s, &cond_val);
        auto result    = p.eval(params).back();
        std::vector<float> result_vector;
        result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
        std::vector<float> gold(ds.elements(), (cond_val ? 3 : 6));
        EXPECT(migraphx::verify_range(gold, result_vector));
    }
319
320
}

umangyadav's avatar
umangyadav committed
321
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
322
323
324
325
326
327
328
TEST_CASE(multitarget_compile_nested_if_then_else)
{
    std::unordered_map<std::size_t, std::size_t> counter_map = {{0, 0}, {1, 0}};
    migraphx::shape ds{migraphx::shape::float_type, {2, 3}};
    migraphx::program p;
    auto* mm = p.get_main_module();
    migraphx::shape cond_s{migraphx::shape::bool_type};
umangyadav's avatar
umangyadav committed
329
330
    auto cond_0             = mm->add_parameter("cond_0", cond_s);
    auto cond_1             = mm->add_parameter("cond_1", cond_s);
331
332
333
334
335
336
337
338
    auto x                  = mm->add_parameter("x", ds);
    auto y                  = mm->add_parameter("y", ds);
    auto z                  = mm->add_parameter("z", ds);
    auto create_test_module = [&](migraphx::program& prog,
                                  const std::vector<migraphx::instruction_ref>& inputs,
                                  std::size_t tid) {
        std::string mod_name =
            "target_" + std::to_string(tid) + "_" + std::to_string(counter_map[tid]++);
umangyadav's avatar
umangyadav committed
339
340
341
342
343
344
345
346
347
        auto* test_mod = prog.create_module(mod_name);
        std::vector<float> data(ds.elements(), -1);
        auto l1               = test_mod->add_literal(migraphx::literal(ds, data));
        auto test_mod_param_0 = test_mod->add_parameter(mod_name + "_param_0", ds);
        auto test_mod_param_1 = test_mod->add_parameter(mod_name + "_param_1", ds);
        auto test_mod_param_2 = test_mod->add_parameter(mod_name + "_param_2", ds);
        auto ins1 = test_mod->add_instruction(migraphx::make_op("add"), test_mod_param_0, l1);
        auto ins2 = test_mod->add_instruction(migraphx::make_op("mul"), ins1, test_mod_param_1);
        auto ins3 = test_mod->add_instruction(migraphx::make_op("sub"), ins2, test_mod_param_2);
348
349
        test_mod->add_return({ins3});
        auto* run_on_target_mod = prog.create_module("run_on_" + mod_name);
umangyadav's avatar
umangyadav committed
350
351
352
353
354
        auto run_ins            = run_on_target_mod->add_instruction(
            migraphx::make_op("run_on_target", {{"target_id", tid}}), inputs, {test_mod});
        auto run_ins_0 = run_on_target_mod->add_instruction(
            migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ins);
        run_on_target_mod->add_return({run_ins_0});
355
356
357
358
359
360
361
362
363
364
365
366
367
        return run_on_target_mod;
    };

    // create nested module with multiple targets.
    // then_mod has one instruction that runs a module on "ref" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* ref_mod = p.create_module("ref_mod");
    auto ref_x    = ref_mod->add_parameter("ref_x", ds);
    auto ref_y    = ref_mod->add_parameter("ref_y", ds);
    auto ref_add  = ref_mod->add_instruction(migraphx::make_op("add"), ref_x, ref_y);
    ref_mod->add_return({ref_add});

    auto* then_mod        = p.create_module("then_mod");
umangyadav's avatar
umangyadav committed
368
369
370
371
372
373
374
375
    auto then_mod_cond    = then_mod->add_parameter("then_mod_cond", cond_s);
    auto then_mod_param_0 = then_mod->add_parameter("then_mod_param_0", ds);
    auto then_mod_param_1 = then_mod->add_parameter("then_mod_param_1", ds);
    auto then_mod_param_2 = then_mod->add_parameter("then_mod_param_2", ds);
    auto then_mod_ref_ins =
        then_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 3}}),
                                  {then_mod_param_0, then_mod_param_1},
                                  {ref_mod});
376
377
    auto then_mod_ref_ins_0 = then_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_ref_ins);
umangyadav's avatar
umangyadav committed
378
    auto then_mod_if = then_mod->add_instruction(
379
        migraphx::make_op("if"),
umangyadav's avatar
umangyadav committed
380
381
382
383
384
385
386
387
388
389
390
391
        {then_mod_cond,
         then_mod_param_0,
         then_mod_param_1,
         then_mod_param_2,
         then_mod_ref_ins_0,
         then_mod_param_1,
         then_mod_param_2},
        {create_test_module(p, {then_mod_param_0, then_mod_param_1, then_mod_param_2}, 1),
         create_test_module(p, {then_mod_ref_ins_0, then_mod_param_1, then_mod_param_2}, 0)});
    auto then_mod_if_0 =
        then_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), then_mod_if);
    then_mod->add_return({then_mod_if_0});
392
393
394
395
396
397
398
399
400
401

    // create nested else_mod with multiple targets.
    // else_mod has one instruction that runs a module on "fpga" and another instruction that
    // creates nested modules using "If" that runs on "cpu" and "gpu"
    auto* fpga_mod = p.create_module("fpga_mod");
    auto fpga_x    = fpga_mod->add_parameter("fpga_x", ds);
    auto fpga_y    = fpga_mod->add_parameter("fpga_y", ds);
    auto fpga_add  = fpga_mod->add_instruction(migraphx::make_op("add"), fpga_x, fpga_y);
    fpga_mod->add_return({fpga_add});

umangyadav's avatar
umangyadav committed
402
403
404
405
406
407
408
409
410
    auto* else_mod        = p.create_module("else_mod");
    auto else_mod_cond    = else_mod->add_parameter("else_mod_cond", cond_s);
    auto else_mod_param_0 = else_mod->add_parameter("else_mod_param_0", ds);
    auto else_mod_param_1 = else_mod->add_parameter("else_mod_param_1", ds);
    auto else_mod_param_2 = else_mod->add_parameter("else_mod_param_2", ds);
    auto else_mod_fpga_ins =
        else_mod->add_instruction(migraphx::make_op("run_on_target", {{"target_id", 2}}),
                                  {else_mod_param_0, else_mod_param_2},
                                  {fpga_mod});
411
412
413
    auto else_mod_fpga_ins_0 = else_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_fpga_ins);

umangyadav's avatar
umangyadav committed
414
415
416
417
418
419
420
421
422
423
424
425
426
427
    auto else_mod_if = else_mod->add_instruction(
        migraphx::make_op("if"),
        {else_mod_cond,
         else_mod_fpga_ins_0,
         else_mod_param_0,
         else_mod_param_1,
         else_mod_param_2,
         else_mod_param_1,
         else_mod_param_0},
        {create_test_module(p, {else_mod_fpga_ins_0, else_mod_param_0, else_mod_param_1}, 0),
         create_test_module(p, {else_mod_param_2, else_mod_param_1, else_mod_param_0}, 1)});
    auto else_mod_if_0 =
        else_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), else_mod_if);
    else_mod->add_return({else_mod_if_0});
428
429

    // Create nested and multi-target main module using "If"
umangyadav's avatar
umangyadav committed
430
431
    auto main_if_ins = mm->add_instruction(
        migraphx::make_op("if"), {cond_0, cond_1, x, y, z, cond_1, x, y, z}, {then_mod, else_mod});
432
433
434
435
    auto r = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), main_if_ins);
    mm->add_return({r});

    // compile
umangyadav's avatar
umangyadav committed
436
437
438
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;

439
440
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
441
442
443
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
444
445
446
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
447
                                   migraphx::make_target("ref"),
448
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
    // do evaluation using different conditions
    // TODO: make two conditional to cover all the paths
    migraphx::parameter_map params;
    int x_i = 2, y_i = 3, z_i = 4;
    std::vector<float> x_data(ds.elements(), x_i);
    std::vector<float> y_data(ds.elements(), y_i);
    std::vector<float> z_data(ds.elements(), z_i);
    params["x"] = migraphx::argument(ds, x_data.data());
    params["y"] = migraphx::argument(ds, y_data.data());
    params["z"] = migraphx::argument(ds, z_data.data());
    // cover all paths with different combination of conditions
    std::vector<std::pair<bool, bool>> test_conds = {
        {true, true}, {true, false}, {false, true}, {false, false}};
    for(auto [cond_val_0, cond_val_1] : test_conds)
    {
        params["cond_0"] = migraphx::argument(cond_s, &cond_val_0);
        params["cond_1"] = migraphx::argument(cond_s, &cond_val_1);
        auto result      = p.eval(params).back();
        // main has one instruction that is : if_then_else
        // then mod is doing : {tmp = x+y; (cond) ? (((x-1)*y)-z)  : (((tmp-1)*y)-z);}
        // else mod is doing : {tmp = x+z; (cond) ? (((tmp-1)*x)-y) : (((z-1)*y)-x);}
        int gold_i = -1;
        if(cond_val_0)
        {
            int tmp_i = x_i + y_i;
            gold_i    = (cond_val_1) ? (((x_i - 1) * y_i) - z_i) : (((tmp_i - 1) * y_i) - z_i);
        }
        else
        {
            int tmp_i = x_i + z_i;
            gold_i    = (cond_val_1) ? (((tmp_i - 1) * x_i) - y_i) : (((z_i - 1) * y_i) - x_i);
        }
        std::vector<float> result_vector;
        result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
        std::vector<float> gold(ds.elements(), gold_i);
        EXPECT(migraphx::verify_range(gold, result_vector));
    }
486
487
}

umangyadav's avatar
umangyadav committed
488
// TODO : FPGA compilation is broken right now, below test mentions fpga but doesn't compile for it
489
490
491
492
493
494
495
496
TEST_CASE(multitarget_select_module)
{
    migraphx::program p;
    // create batch submodules
    auto create_submodule = [&](std::size_t batch_size, const std::string& module_name) {
        auto* submod = p.create_module(module_name);
        migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
        auto sm_input = submod->add_parameter("data", sm_shape);
umangyadav's avatar
umangyadav committed
497
498
        migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
        auto literal_ins = submod->add_literal(migraphx::literal{lit_s, {6}});
499
500
501
502
        auto broadcast_lit =
            submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
        auto add_ins0 = submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
        auto add_ins1 = submod->add_instruction(migraphx::make_op("add"), add_ins0, broadcast_lit);
umangyadav's avatar
umangyadav committed
503
        submod->add_return({add_ins1});
504
505
506
507
508
509
510
511
        return submod;
    };
    auto* batch1 = create_submodule(1, "batch_1");
    auto* batch2 = create_submodule(2, "batch_2");
    auto* batch3 = create_submodule(3, "batch_3");
    auto* batch4 = create_submodule(4, "batch_4");

    auto* run_cpu_mod = p.create_module("cpu_mod");
umangyadav's avatar
umangyadav committed
512
513
    auto cpu_param =
        run_cpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {1, 4}});
514
515
    auto run_cpu_ins = run_cpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 1}}), {cpu_param}, {batch1});
umangyadav's avatar
umangyadav committed
516
517
518
    auto run_cpu_ins_0 = run_cpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_cpu_ins);
    run_cpu_mod->add_return({run_cpu_ins_0});
519
520

    auto* run_gpu_mod = p.create_module("gpu_mod");
umangyadav's avatar
umangyadav committed
521
522
    auto gpu_param =
        run_gpu_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 4}});
523
524
    auto run_gpu_ins = run_gpu_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 0}}), {gpu_param}, {batch2});
umangyadav's avatar
umangyadav committed
525
526
527
    auto run_gpu_ins_0 = run_gpu_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_gpu_ins);
    run_gpu_mod->add_return({run_gpu_ins_0});
528
529

    auto* run_fpga_mod = p.create_module("fpga_mod");
umangyadav's avatar
umangyadav committed
530
531
    auto fpga_param =
        run_fpga_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4}});
532
533
    auto run_fpga_ins = run_fpga_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 2}}), {fpga_param}, {batch3});
umangyadav's avatar
umangyadav committed
534
535
536
    auto run_fpga_ins_0 = run_fpga_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_fpga_ins);
    run_fpga_mod->add_return({run_fpga_ins_0});
537
538

    auto* run_ref_mod = p.create_module("ref_mod");
umangyadav's avatar
umangyadav committed
539
540
    auto ref_param =
        run_ref_mod->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {4, 4}});
541
542
    auto run_ref_ins = run_ref_mod->add_instruction(
        migraphx::make_op("run_on_target", {{"target_id", 3}}), {ref_param}, {batch4});
umangyadav's avatar
umangyadav committed
543
544
545
    auto run_ref_ins_0 = run_ref_mod->add_instruction(
        migraphx::make_op("get_tuple_elem", {{"index", 0}}), run_ref_ins);
    run_ref_mod->add_return({run_ref_ins_0});
546

umangyadav's avatar
umangyadav committed
547
548
549
    auto* mm = p.get_main_module();
    migraphx::shape dyn_s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
    auto input                              = mm->add_parameter("data", dyn_s);
550
551
552
553
554
555
556
557
558
    std::vector<migraphx::shape> sub_shapes = {};
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
    migraphx::shape out_attr = migraphx::shape{sub_shapes};
    auto sm_ins              = mm->add_instruction(
        migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
        {input},
        {run_cpu_mod, run_gpu_mod, run_fpga_mod, run_ref_mod});
    auto ret0 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
umangyadav's avatar
umangyadav committed
559
    mm->add_return({ret0});
560
    // compile
umangyadav's avatar
umangyadav committed
561
562
    migraphx::compile_options gpu_opts;
    gpu_opts.offload_copy = true;
563
564
    p.compile({migraphx::make_target("gpu"),
               migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
565
566
567
               migraphx::make_target("ref"),
               migraphx::make_target("ref")},
              {gpu_opts});
568
569
570
    EXPECT(check_compiled_program(p,
                                  {migraphx::make_target("gpu"),
                                   migraphx::make_target("cpu"),
umangyadav's avatar
umangyadav committed
571
                                   migraphx::make_target("ref"),
572
                                   migraphx::make_target("ref")}));
umangyadav's avatar
umangyadav committed
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
    // program does the 12+x where x has dynamic shape {{1, 4}, {4, 4}}
    float seed = 0.0f;
    std::mt19937 gen(seed);
    std::uniform_real_distribution<> dis(0.0, 1.0);
    auto get_random_values = [&](size_t elements) {
        std::vector<float> rand_samples(elements);
        std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
        return rand_samples;
    };
    for(const size_t bs : {1, 2, 3, 4})
    {
        migraphx::shape arg_shape{migraphx::shape::float_type, {bs, 4}};
        std::vector<float> data = get_random_values(arg_shape.elements());
        migraphx::parameter_map params;
        params["data"] = migraphx::argument(arg_shape, data.data());
        auto result    = p.eval(params).back();
        std::vector<float> result_vec;
        result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
        std::vector<float> gold = data;
        std::transform(gold.begin(), gold.end(), gold.begin(), [&](auto i) { return i + 12; });
        EXPECT(migraphx::verify_range(gold, result_vec));
    }
595
596
597
}

int main(int argc, const char* argv[]) { test::run(argc, argv); }