softmax.cpp 3.36 KB
Newer Older
Khalique's avatar
Khalique committed
1
2
3
4
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/softmax.hpp>
5
#include <migraphx/gpu/device/reduce.hpp>
Khalique's avatar
Khalique committed
6
7
8
9
10
11
12
13
14
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/gpu/device/types.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {

15
void softmax(hipStream_t stream, const argument& result, const argument& arg, int64_t axis)
Khalique's avatar
Khalique committed
16
{
17
18
    auto batch_lens          = result.get_shape().lens();
    index_int batch_item_num = batch_lens[axis];
19
    batch_lens[axis]         = 1;
20
    migraphx::shape batch_shape{result.get_shape().type(), batch_lens};
Khalique's avatar
Khalique committed
21

Paul's avatar
Paul committed
22
    hip_visit_all(result, arg, batch_shape)([&](auto output, auto input, auto batch) {
23
        const index_int max_block_size = 128;
Khalique Ahmed's avatar
Khalique Ahmed committed
24
        const index_int block_size = compute_block_size(batch_item_num, max_block_size);
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
        using type = device_type<std::remove_cv_t<typename decltype(input)::value_type>>;
        type init  = lowest();

        if(axis == batch_lens.size() - 1)
        {
            gs_launch(stream, batch_shape.elements() * block_size, block_size)(
                [=](auto i, auto idx) __device__ {
                    auto start_loc = i / block_size * batch_item_num;
                    auto batch_max = block_reduce<max_block_size>(
                        idx, max{}, init, batch_item_num, [&](auto j) __device__ {
                            return input[start_loc + j];
                        });

                    auto batch_sum = block_reduce<max_block_size>(
                        idx, sum{}, 0, batch_item_num, [&](auto j) __device__ {
                            auto val = input[start_loc + j] - batch_max;
                            return ::exp(to_hip_type(val));
                        });
43

44
45
46
47
                    idx.local_stride(batch_item_num, [&](auto j) __device__ {
                        auto val              = input[start_loc + j] - batch_max;
                        output[start_loc + j] = ::exp(to_hip_type(val)) / batch_sum;
                    });
Shucai Xiao's avatar
Shucai Xiao committed
48
                });
49
50
51
52
53
54
55
56
57
58
59
        }
        else
        {
            gs_launch(stream, batch_shape.elements() * block_size, block_size)(
                [=](auto i, auto idx) __device__ {
                    auto data_idx  = batch.multi(i / block_size);
                    auto batch_max = block_reduce<max_block_size>(
                        idx, max{}, init, batch_item_num, [&](auto j) __device__ {
                            data_idx[axis] = j;
                            return input[data_idx];
                        });
60

61
62
63
64
65
66
67
68
69
70
71
72
73
74
                    auto batch_sum = block_reduce<max_block_size>(
                        idx, sum{}, 0, batch_item_num, [&](auto j) __device__ {
                            data_idx[axis] = j;
                            auto val       = input[data_idx] - batch_max;
                            return ::exp(to_hip_type(val));
                        });

                    idx.local_stride(batch_item_num, [&](auto j) __device__ {
                        data_idx[axis]   = j;
                        auto val         = input[data_idx] - batch_max;
                        output[data_idx] = ::exp(to_hip_type(val)) / batch_sum;
                    });
                });
        }
Khalique's avatar
Khalique committed
75
76
77
78
79
80
81
    });
}

} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx