fused_lamb_cuda.cpp 3.86 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/* Copyright 2019 The Microsoft DeepSpeed Team */
#include <torch/extension.h>

// CUDA forward declaration
void fused_lamb_cuda(at::Tensor& p,
                     at::Tensor& p_copy,
                     at::Tensor& m,
                     at::Tensor& v,
                     at::Tensor& g,
                     float lr,
                     float beta1,
                     float beta2,
                     float max_coeff,
                     float min_coeff,
                     float eps,
                     float grad_scale,
                     int step,
                     int mode,
                     int bias_correction,
                     float decay,
                     at::Tensor& w_l2_i,
                     at::Tensor& u_l2_i,
                     at::Tensor& lamb_coeff_val);

#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
    CHECK_CUDA(x);     \
    CHECK_CONTIGUOUS(x)

// C++ interface
at::Tensor lamb(at::Tensor& p,
                at::Tensor& p_copy,
                at::Tensor& m,
                at::Tensor& v,
                at::Tensor& g,
                float lr,
                float beta1,
                float beta2,
                float max_coeff,
                float min_coeff,
                float eps,
                float grad_scale,
                int step,
                int mode,
                int bias_correction,
                float decay)
{
    CHECK_INPUT(p);
    if (p_copy.numel() > 0) CHECK_INPUT(p_copy);
    CHECK_INPUT(m);
    CHECK_INPUT(v);
    CHECK_INPUT(g);
    int64_t num_elem = p.numel();
    AT_ASSERTM(m.numel() == num_elem, "number of elements in m and p tensors should be equal");
    AT_ASSERTM(v.numel() == num_elem, "number of elements in v and p tensors should be equal");
    AT_ASSERTM(g.numel() == num_elem, "number of elements in g and p tensors should be equal");
    AT_ASSERTM(
        p_copy.numel() == num_elem || p_copy.numel() == 0,
        "number of elements in p_copy and p tensors should be equal, or p_copy should be empty");

    // intermediate for weight L2 reduction
    // make sure that the threads per block is at least 512 during the kernel launch otherwise the
    // behavious is unexpected
    at::Tensor w_l2_i = at::empty(
        {512},
        p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
                                                                        : p.type().scalarType()));

    // intermediate for update L2 reduction
    // make sure that the threads per block is at least 512 during the kernel launch otherwise the
    // behavious is unexpected
    at::Tensor u_l2_i = at::empty(
        {512},
        p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
                                                                        : p.type().scalarType()));

    at::Tensor lamb_coeff_val = at::empty(
        {1},
        p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
                                                                        : p.type().scalarType()));

    fused_lamb_cuda(p,
                    p_copy,
                    m,
                    v,
                    g,
                    lr,
                    beta1,
                    beta2,
                    max_coeff,
                    min_coeff,
                    eps,
                    grad_scale,
                    step,
                    mode,
                    bias_correction,
                    decay,
                    w_l2_i,
                    u_l2_i,
                    lamb_coeff_val);

    return lamb_coeff_val;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
    m.def("lamb", &lamb, "Adam optimized CUDA implementation with LAMB.");
}