mlir.cpp 2.98 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/module.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <test.hpp>

using migraphx::trim;

std::string encode(std::string s)
{
    std::stringstream ss;
    bool prespace = false;
Paul's avatar
Paul committed
14
    for(auto c : s)
15
    {
Paul's avatar
Paul committed
16
        if(std::isspace(c))
17
        {
Paul's avatar
Paul committed
18
            if(not prespace)
19
20
21
                ss << "  ";
            prespace = true;
        }
Paul's avatar
Paul committed
22
        else if(std::isprint(c))
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
        {
            ss << c;
            prespace = false;
        }
    }
    return migraphx::trim(ss.str());
}

TEST_CASE(conv)
{
    const std::string mlir_output = R"__migraphx__(
module  {
  func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> {
    %0 = "migraphx.convolution"(%arg0, %arg1) {dilation = [1 : si64, 1 : si64], group = 1 : si64, padding = [0 : si64, 0 : si64], padding_mode = 0 : si64, stride = [1 : si64, 1 :
si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
Paul's avatar
Paul committed
38
    %1 = return %0 : tensor<1x2x2x2xf32>
39
40
41
42
  }
}
)__migraphx__";
    migraphx::module m;
Paul's avatar
Format  
Paul committed
43
44
    auto x    = m.add_parameter("x", {migraphx::shape::float_type, {1, 8, 4, 4}});
    auto w    = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
Paul's avatar
Paul committed
45
46
    auto conv = m.add_instruction(migraphx::make_op("convolution"), x, w);
    m.add_return({conv});
47
    auto s = migraphx::gpu::dump_mlir(m);
Paul's avatar
Paul committed
48
    // Skip test if MLIR is not enabled
Paul's avatar
Format  
Paul committed
49
    if(s.empty())
Paul's avatar
Paul committed
50
51
        return;
    EXPECT(encode(s) == encode(mlir_output));
Paul's avatar
Paul committed
52
    auto op = migraphx::gpu::compile_mlir(m);
Paul's avatar
Paul committed
53
54
55
56
57
58
59
60
61
62
}

TEST_CASE(conv_add_relu)
{
    const std::string mlir_output = R"__migraphx__(
module  {
  func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
    %0 = "migraphx.convolution"(%arg0, %arg1) {dilation = [1 : si64, 1 : si64], group = 1 : si64, padding = [0 : si64, 0 : si64], padding_mode = 0 : si64, stride = [1 : si64, 1 : si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
    %1 = "migraphx.add"(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
    %2 = "migraphx.relu"(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
Paul's avatar
Paul committed
63
    %3 = return %2 : tensor<1x2x2x2xf32>
Paul's avatar
Paul committed
64
65
66
67
  }
}
)__migraphx__";
    migraphx::module m;
Paul's avatar
Format  
Paul committed
68
69
70
    auto x    = m.add_parameter("x", {migraphx::shape::float_type, {1, 8, 4, 4}});
    auto w    = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
    auto b    = m.add_parameter("b", {migraphx::shape::float_type, {1, 2, 2, 2}});
Paul's avatar
Paul committed
71
    auto conv = m.add_instruction(migraphx::make_op("convolution"), x, w);
Paul's avatar
Format  
Paul committed
72
    auto add  = m.add_instruction(migraphx::make_op("add"), conv, b);
Paul's avatar
Paul committed
73
74
    auto relu = m.add_instruction(migraphx::make_op("relu"), add);
    m.add_return({relu});
Paul's avatar
Paul committed
75
76
    auto s = migraphx::gpu::dump_mlir(m);
    // Skip test if MLIR is not enabled
Paul's avatar
Format  
Paul committed
77
    if(s.empty())
Paul's avatar
Paul committed
78
        return;
79
80
81
82
    EXPECT(encode(s) == encode(mlir_output));
}

int main(int argc, const char* argv[]) { test::run(argc, argv); }