"...lm-evaluation-harness.git" did not exist on "66736bc183a7aee5dba4179ca2ccf96c3a8ff736"
logsoftmax.cpp 4.39 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/logsoftmax.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <migraphx/gpu/hip.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {

argument logsoftmax(hipStream_t stream,
Shucai Xiao's avatar
Shucai Xiao committed
15
16
17
                    const migraphx::shape& output_shape,
                    std::vector<migraphx::argument> args,
                    int axis)
18
19
{

Shucai Xiao's avatar
Shucai Xiao committed
20
    auto lens         = output_shape.lens();
21
    auto num_in_batch = lens[axis];
Shucai Xiao's avatar
Shucai Xiao committed
22
23
    auto batch_lens   = lens;
    batch_lens[axis]  = 1;
24
    migraphx::shape batch_shape{output_shape.type(), batch_lens};
25
26

    visit_all(args.back(), args.front())([&](auto output, auto input) {
Shucai Xiao's avatar
Shucai Xiao committed
27
28
        const auto* input_ptr = device_cast(input.data());
        auto* output_ptr      = device_cast(output.data());
29
30
31
        visit_tensor_size(batch_shape.lens().size(), [&](auto n_dim) {
            hip_tensor_descriptor<n_dim> desc_batch(batch_shape);
            hip_tensor_descriptor<n_dim> desc_data(output_shape);
32

33
34
35
36
37
38
39
40
41
            // use one block for items in one batch.
            // opt 1, load all data to lds then use the same approach as
            // the current optimization
            const size_t block_size = 1024;
            launch(stream, batch_shape.elements() * block_size, block_size) ([=] (auto idx) __device__ {
                size_t thr_idx = idx.local;
                size_t blk_idx = idx.group;
                // using type = typename decltype(input)::value_type;
                using type = device_type<std::remove_cv_t<typename decltype(output)::value_type>>;
42

43
44
45
46
47
48
49
50
51
                // all data can be loaded to the lds once, so all operations are
                // done in lds
                MIGRAPHX_DEVICE_SHARED type lds_data[block_size + 2];
                auto batch_idx = desc_batch.multi(blk_idx);
                auto data_idx = batch_idx;
                // load data to lds and compute the batch max
                size_t item_num = num_in_batch;
                lds_data[block_size] = input_ptr[0];
                for (size_t i = thr_idx; i < num_in_batch; i += block_size)
52
                {
53
54
                    data_idx[axis] = i;
                    lds_data[i] = input_ptr[desc_data.linear(data_idx)];
55

56
57
58
59
60
61
62
63
64
65
66
67
68
                    __syncthreads();

                    // use thread 0 for batch_max
                    if (thr_idx == 0)
                    {
                        auto size = (item_num > block_size) ? block_size : item_num;
                        for (size_t j = 0; j < size; j++)
                        {
                            lds_data[block_size] = ::max(to_hip_type(lds_data[block_size]), to_hip_type(lds_data[j]));
                        }
                        item_num -= block_size;
                    }
                    __syncthreads();
69
                }
70

71
72
73
74
                const size_t block_size1 = block_size + 1;
                lds_data[block_size1] = 0;
                item_num = num_in_batch;
                for (size_t i = thr_idx; i < num_in_batch; i += block_size)
75
                {
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
                    data_idx[axis] = i;
                    lds_data[i] = input_ptr[desc_data.linear(data_idx)];

                    __syncthreads();

                    // use thread 0 for batch_max
                    if (thr_idx == 0)
                    {
                        auto size = (item_num > block_size) ? block_size : item_num;
                        for (size_t j = 0; j < size; j++)
                        {
                            lds_data[block_size1] += ::exp(to_hip_type(lds_data[j] - lds_data[block_size]));
                        }
                        item_num -= block_size;
                    }
                    __syncthreads();
92
93
                }

94
95
96
                auto log_batch_sum = ::log(to_hip_type(lds_data[block_size1])) + lds_data[block_size];
                item_num = num_in_batch;
                for (size_t i = thr_idx; i < num_in_batch; i += block_size)
97
                {
98
99
100
                    data_idx[axis] = i;
                    size_t index = desc_data.linear(data_idx);
                    output_ptr[index] = input_ptr[index] - log_batch_sum;
101
102
                }
            });
103
104
105
106
107
108
109
110
111
112
        });
    });

    return args.back();
}

} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx