logsoftmax.cpp 4.51 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/logsoftmax.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <migraphx/gpu/hip.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {

argument logsoftmax(hipStream_t stream,
Shucai Xiao's avatar
Shucai Xiao committed
15
16
17
                    const migraphx::shape& output_shape,
                    std::vector<migraphx::argument> args,
                    int axis)
18
19
{

Shucai Xiao's avatar
Shucai Xiao committed
20
    auto lens         = output_shape.lens();
21
    auto num_in_batch = lens[axis];
Shucai Xiao's avatar
Shucai Xiao committed
22
23
    auto batch_lens   = lens;
    batch_lens[axis]  = 1;
24
    migraphx::shape batch_shape{output_shape.type(), batch_lens};
25
26

    visit_all(args.back(), args.front())([&](auto output, auto input) {
Shucai Xiao's avatar
Shucai Xiao committed
27
28
        const auto* input_ptr = device_cast(input.data());
        auto* output_ptr      = device_cast(output.data());
29
30
31
        visit_tensor_size(batch_shape.lens().size(), [&](auto n_dim) {
            hip_tensor_descriptor<n_dim> desc_batch(batch_shape);
            hip_tensor_descriptor<n_dim> desc_data(output_shape);
32

33
34
35
36
            // use one block for items in one batch.
            // opt 1, load all data to lds then use the same approach as
            // the current optimization
            const size_t block_size = 1024;
Shucai Xiao's avatar
Shucai Xiao committed
37
38
            launch(
                stream, batch_shape.elements() * block_size, block_size)([=](auto idx) __device__ {
39
40
41
42
                size_t thr_idx = idx.local;
                size_t blk_idx = idx.group;
                // using type = typename decltype(input)::value_type;
                using type = device_type<std::remove_cv_t<typename decltype(output)::value_type>>;
43

44
45
46
47
                // all data can be loaded to the lds once, so all operations are
                // done in lds
                MIGRAPHX_DEVICE_SHARED type lds_data[block_size + 2];
                auto batch_idx = desc_batch.multi(blk_idx);
Shucai Xiao's avatar
Shucai Xiao committed
48
                auto data_idx  = batch_idx;
49
                // load data to lds and compute the batch max
Shucai Xiao's avatar
Shucai Xiao committed
50
                size_t item_num      = num_in_batch;
51
                lds_data[block_size] = input_ptr[0];
Shucai Xiao's avatar
Shucai Xiao committed
52
                for(size_t i = thr_idx; i < num_in_batch; i += block_size)
53
                {
54
                    data_idx[axis] = i;
Shucai Xiao's avatar
Shucai Xiao committed
55
                    lds_data[i]    = input_ptr[desc_data.linear(data_idx)];
56

57
58
59
                    __syncthreads();

                    // use thread 0 for batch_max
Shucai Xiao's avatar
Shucai Xiao committed
60
                    if(thr_idx == 0)
61
62
                    {
                        auto size = (item_num > block_size) ? block_size : item_num;
Shucai Xiao's avatar
Shucai Xiao committed
63
                        for(size_t j = 0; j < size; j++)
64
                        {
Shucai Xiao's avatar
Shucai Xiao committed
65
66
                            lds_data[block_size] =
                                ::max(to_hip_type(lds_data[block_size]), to_hip_type(lds_data[j]));
67
68
69
70
                        }
                        item_num -= block_size;
                    }
                    __syncthreads();
71
                }
72

73
                const size_t block_size1 = block_size + 1;
Shucai Xiao's avatar
Shucai Xiao committed
74
75
76
                lds_data[block_size1]    = 0;
                item_num                 = num_in_batch;
                for(size_t i = thr_idx; i < num_in_batch; i += block_size)
77
                {
78
                    data_idx[axis] = i;
Shucai Xiao's avatar
Shucai Xiao committed
79
                    lds_data[i]    = input_ptr[desc_data.linear(data_idx)];
80
81
82
83

                    __syncthreads();

                    // use thread 0 for batch_max
Shucai Xiao's avatar
Shucai Xiao committed
84
                    if(thr_idx == 0)
85
86
                    {
                        auto size = (item_num > block_size) ? block_size : item_num;
Shucai Xiao's avatar
Shucai Xiao committed
87
                        for(size_t j = 0; j < size; j++)
88
                        {
Shucai Xiao's avatar
Shucai Xiao committed
89
90
                            lds_data[block_size1] +=
                                ::exp(to_hip_type(lds_data[j] - lds_data[block_size]));
91
92
93
94
                        }
                        item_num -= block_size;
                    }
                    __syncthreads();
95
96
                }

Shucai Xiao's avatar
Shucai Xiao committed
97
98
                auto log_batch_sum =
                    ::log(to_hip_type(lds_data[block_size1])) + lds_data[block_size];
99
                item_num = num_in_batch;
Shucai Xiao's avatar
Shucai Xiao committed
100
                for(size_t i = thr_idx; i < num_in_batch; i += block_size)
101
                {
Shucai Xiao's avatar
Shucai Xiao committed
102
103
                    data_idx[axis]    = i;
                    size_t index      = desc_data.linear(data_idx);
104
                    output_ptr[index] = input_ptr[index] - log_batch_sum;
105
106
                }
            });
107
108
109
110
111
112
113
114
115
116
        });
    });

    return args.back();
}

} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx