pooling.cpp 1.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#include <migraphx/config.hpp>
#include <migraphx/register_op.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/context.hpp>
#include <migraphx/cpu/context.hpp>
#include <migraphx/cpu/dnnl.hpp>
#include <migraphx/op/pooling.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace cpu {

struct dnnl_pooling : dnnl_extend_op<dnnl_pooling, dnnl::pooling_forward, op::pooling>
{
16
    std::vector<int> arg_map(int) const { return {MIGRAPHX_DNNL_PREFIX(ARG_SRC)}; }
17
18
19

    dnnl::pooling_forward::desc get_desc(const std::unordered_map<int, dnnl::memory::desc>& m) const
    {
20
21
        auto algo = op.mode == op::pooling_mode::max ? dnnl::algorithm::pooling_max
                                                     : dnnl::algorithm::pooling_avg;
kahmed10's avatar
kahmed10 committed
22
23
24
        auto kdims = op.kdims();
        std::vector<size_t> padding_l(op.padding.begin(), op.padding.begin() + kdims);
        std::vector<size_t> padding_r(op.padding.begin() + kdims, op.padding.end());
25
26
        return {dnnl::prop_kind::forward_inference,
                algo,
27
28
                m.at(MIGRAPHX_DNNL_PREFIX(ARG_SRC)),
                m.at(MIGRAPHX_DNNL_PREFIX(ARG_DST)),
29
30
                to_dnnl_dims(op.stride),
                to_dnnl_dims(op.lengths),
kahmed10's avatar
kahmed10 committed
31
32
                to_dnnl_dims(padding_l),
                to_dnnl_dims(padding_r)};
33
34
35
36
    }
};

} // namespace cpu
37

38
39
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx