threadwise_gemm.hip.hpp 4.88 KB
Newer Older
1
2
#pragma once

Jing Zhang's avatar
Jing Zhang committed
3
4
extern "C" __attribute__((address_space(3))) void* __to_local(void* p) [[hc]];

5
template <class Float, class SrcMatrix, class DstMatrix, unsigned NRow, unsigned NCol>
Chao Liu's avatar
Chao Liu committed
6
7
8
9
10
__device__ void threadwise_matrix_copy(SrcMatrix,
                                       const Float* __restrict__ p_src,
                                       DstMatrix,
                                       Float* __restrict__ p_dst,
                                       Sequence<NRow, NCol>)
11
{
Chao Liu's avatar
Chao Liu committed
12
13
    constexpr auto src_mtx = SrcMatrix{};
    constexpr auto dst_mtx = DstMatrix{};
14
15
16

    for(unsigned i = 0; i < NRow; ++i)
    {
Jing Zhang's avatar
Jing Zhang committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#if 1
        assert(NCol == 8);
        {
            const unsigned src_index = src_mtx.Get1dIndex(i, 0);
            const unsigned dst_index = dst_mtx.Get1dIndex(i, 0);

            const float4* loc = (const float4 *)(p_src + src_index);
            float4* reg = (float4 *)(p_dst + dst_index); 

            //reg[0] = loc[0];
            //reg[1] = loc[1];

            asm volatile("\n \
                    ds_read2_b64 %0, %2 offset1:1 \n \
                    ds_read2_b64 %1, %2 offset0:16 offset1:17 \n \
                    s_waitcnt lgkmcnt(0)" : "=v"(reg[0]), "=v"(reg[1]) : "v"(__to_local((void *)&p_src[src_index])));
        }

#else
36
37
38
39
        for(unsigned j = 0; j < NCol; ++j)
        {
            const unsigned src_index = src_mtx.Get1dIndex(i, j);
            const unsigned dst_index = dst_mtx.Get1dIndex(i, j);
Jing Zhang's avatar
Jing Zhang committed
40
41
42
            
            //p_dst[dst_index] = p_src[src_index];
            asm volatile("ds_read_b32 %0, %1 \ns_waitcnt lgkmcnt(0)" : "=v"(p_dst[dst_index]) : "v"(__to_local((void *)&p_src[src_index])));
43
        }
Jing Zhang's avatar
Jing Zhang committed
44
#endif
45
46
47
48
49
50
51
52
53
54
55
56
57
58
    }
}

template <class MatrixA,
          class MatrixB,
          class MatrixC,
          bool TransA,
          bool TransB,
          bool TransC,
          class FloatA,
          class FloatB,
          class FloatC,
          class Accumulator>
__device__ void threadwise_gemm(MatrixA,
Chao Liu's avatar
Chao Liu committed
59
                                integral_constant<bool, TransA>,
Chao Liu's avatar
Chao Liu committed
60
                                const FloatA* __restrict__ p_a_thread,
61
                                MatrixB,
Chao Liu's avatar
Chao Liu committed
62
                                integral_constant<bool, TransB>,
Chao Liu's avatar
Chao Liu committed
63
                                const FloatB* __restrict__ p_b_thread,
64
                                MatrixC,
Chao Liu's avatar
Chao Liu committed
65
                                integral_constant<bool, TransC>,
Chao Liu's avatar
Chao Liu committed
66
                                FloatC* __restrict__ p_c_thread,
67
68
69
70
                                Accumulator f_accum)
{
    if(TransA && (!TransB) && (!TransC))
    {
Chao Liu's avatar
Chao Liu committed
71
72
73
        constexpr auto a_mtx = MatrixA{};
        constexpr auto b_mtx = MatrixB{};
        constexpr auto c_mtx = MatrixC{};
74
75
76
77
78

        constexpr unsigned M = c_mtx.NRow();
        constexpr unsigned N = c_mtx.NCol();
        constexpr unsigned K = a_mtx.NRow(); // A is transposed

79
80
81
82
        assert(M == 8);
        assert(N == 8);
        assert(K == 1);

Chao Liu's avatar
Chao Liu committed
83
        for(unsigned k = 0; k < K; ++k)
84
        {
85
            const unsigned bindex = b_mtx.Get1dIndex(k, 0);
Chao Liu's avatar
Chao Liu committed
86
            for(unsigned i = 0; i < M; ++i)
87
            {
88
89
90
91
92
                const unsigned aindex = a_mtx.Get1dIndex(k, i); // A is transposed
                const unsigned cindex = c_mtx.Get1dIndex(i, 0);

                //N = 8
                //for(unsigned j = 0; j < N; ++j)
93
                {
94
95
96
                    //const unsigned bindex = b_mtx.Get1dIndex(k, j);
                    //const unsigned cindex = c_mtx.Get1dIndex(i, j);
                    //f_accum(p_c_thread[cindex], p_a_thread[aindex] * p_b_thread[bindex]);
97

98
99
100
101
102
103
104
105
106
107
108
109
110
111
                    asm volatile("\n \
                            v_mac_f32 %0, %8, %9 \n \
                            v_mac_f32 %1, %8, %10 \n \
                            v_mac_f32 %2, %8, %11 \n \
                            v_mac_f32 %3, %8, %12 \n \
                            v_mac_f32 %4, %8, %13 \n \
                            v_mac_f32 %5, %8, %14 \n \
                            v_mac_f32 %6, %8, %15 \n \
                            v_mac_f32 %7, %8, %16 \n \
                            "
                            : "=v"(p_c_thread[cindex + 0]),"=v"(p_c_thread[cindex + 1]),"=v"(p_c_thread[cindex + 2]),"=v"(p_c_thread[cindex + 3]),"=v"(p_c_thread[cindex + 4]),"=v"(p_c_thread[cindex + 5]),"=v"(p_c_thread[cindex + 6]),"=v"(p_c_thread[cindex + 7])
                            : "v"(p_a_thread[aindex]), "v"(p_b_thread[bindex + 0]), "v"(p_b_thread[bindex + 1]),"v"(p_b_thread[bindex + 2]),"v"(p_b_thread[bindex + 3]),"v"(p_b_thread[bindex + 4]),"v"(p_b_thread[bindex + 5]),"v"(p_b_thread[bindex + 6]),"v"(p_b_thread[bindex + 7]),
                            "0"(p_c_thread[cindex + 0]),"1"(p_c_thread[cindex + 1]),"2"(p_c_thread[cindex + 2]),"3"(p_c_thread[cindex + 3]),"4"(p_c_thread[cindex + 4]),"5"(p_c_thread[cindex + 5]),"6"(p_c_thread[cindex + 6]),"7"(p_c_thread[cindex + 7])
                            );
112
113
114
115
116
117
118
119
120
121
                }
            }
        }
    }
    else
    {
        // not implemented
        assert(false);
    }
}