smart_schedule.cpp 4.16 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
4
5
6
7
8
#ifdef FMOE_USE_NCCL

#include <cstdlib>
#include <vector>
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>

#include "smart_schedule.h"
9
#include "status.h"
Rick Ho's avatar
Rick Ho committed
10
11
12

long pipeline_gran = -1;

13
14
15
16
17
18
19
20
21
int smart_sch_enabled = 0;

int isSmartSchEnabled() {
    return smart_sch_enabled;
}
void setSmartSchEnabled(int s) {
    smart_sch_enabled = s;
}

Rick Ho's avatar
Rick Ho committed
22
std::vector<torch::Tensor> _smart_sch_forward(
Rick Ho's avatar
Rick Ho committed
23
24
25
26
27
        torch::Tensor input_buf,
        torch::Tensor local_expert_count,
        torch::Tensor global_expert_count,
        torch::Tensor stored_models,
        long global_batch_size,
Rick Ho's avatar
Rick Ho committed
28
        long expert_size,
Rick Ho's avatar
Rick Ho committed
29
        long n_workers,
Rick Ho's avatar
Rick Ho committed
30
31
32
33
        py::function forward_fn,
        py::function get_param_fn,
        py::function stash_fn,
        py::function pop_fn) {
Rick Ho's avatar
Rick Ho committed
34
35
36
37
38
39
40
    if (pipeline_gran == -1) {
        char* p = getenv("FMOE_FASTER_GROUP_SIZE");
        if (p) {
            pipeline_gran = atoi(p);
        } else {
            pipeline_gran = 4;
        }
41
        setSmartSchEnabled(1);
Rick Ho's avatar
Rick Ho committed
42
43
44
45
46
47
48
49
50
    }

    auto smgr = getCudaStreamManager(input_buf.device().index());
    int rank;
    NCCL_SAFE_CALL(ncclCommUserRank(smgr->ncclcomm, &rank));

    const auto num_expert = local_expert_count.size(0) / n_workers;
    const auto d_model = input_buf.size(1);

Rick Ho's avatar
Rick Ho committed
51
    // TODO: maybe empty is faster
Rick Ho's avatar
Rick Ho committed
52
53
54
55
56
    auto global_input_buf = input_buf.new_zeros({global_batch_size, d_model});
    auto global_output_buf = input_buf.new_zeros({global_batch_size, d_model});
    
    auto output_buf = input_buf.new_zeros({input_buf.size(0), d_model});

Rick Ho's avatar
Rick Ho committed
57
58
59
60
61
62
63
64
65
66
67
68
    std::vector<torch::Tensor> params;
    auto stored_models_ = stored_models.data_ptr<bool>();
    for (long i = 0; i < num_expert * n_workers; ++i) {
        if (stored_models_[i]) {
            torch::Tensor t = input_buf.new_empty({expert_size});
            if (i / num_expert == rank) {
                get_param_fn(t);
            }
            params.push_back(t);
        }
    }

Rick Ho's avatar
Rick Ho committed
69
    AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_buf.scalar_type(), 
Rick Ho's avatar
Rick Ho committed
70
            "fmoe_cuda_smart_sch_forward", ([&] {
Rick Ho's avatar
Rick Ho committed
71
72
        fmoe_cuda_fused_forward_impl(
            forward_fn,
Rick Ho's avatar
Rick Ho committed
73
74
            stash_fn,
            pop_fn,
Rick Ho's avatar
Rick Ho committed
75
            input_buf.device(),
Rick Ho's avatar
Rick Ho committed
76
            params,
Rick Ho's avatar
Rick Ho committed
77
78
79
80
81
82
83
84
85

            input_buf.data_ptr<scalar_t>(),
            global_input_buf.data_ptr<scalar_t>(),
            global_output_buf.data_ptr<scalar_t>(),
            output_buf.data_ptr<scalar_t>(),

            local_expert_count.data_ptr<long>(),
            global_expert_count.data_ptr<long>(),
            stored_models.data_ptr<bool>(),
Rick Ho's avatar
Rick Ho committed
86
            d_model, num_expert, rank, n_workers, expert_size,
Rick Ho's avatar
Rick Ho committed
87
88
            pipeline_gran, smgr);
    }));
Rick Ho's avatar
Rick Ho committed
89
    return {output_buf, global_input_buf};
Rick Ho's avatar
Rick Ho committed
90
91
}

Rick Ho's avatar
Rick Ho committed
92
torch::Tensor _smart_sch_backward(
Rick Ho's avatar
Rick Ho committed
93
94
95
96
97
        torch::Tensor grad_out,
        torch::Tensor local_expert_count,
        torch::Tensor global_expert_count,
        torch::Tensor stored_models,
        long buf_batch_size,
Rick Ho's avatar
Rick Ho committed
98
        long global_batch_size,
Rick Ho's avatar
Rick Ho committed
99
        long expert_size,
Rick Ho's avatar
Rick Ho committed
100
        long n_workers,
Rick Ho's avatar
Rick Ho committed
101
102
103
104
105
        py::function backward_fn,
        py::function stash_fn,
        py::function pop_fn,
        py::function collect_fn,
        py::function set_grad_fn) {
Rick Ho's avatar
Rick Ho committed
106
    const auto num_expert = local_expert_count.size(0) / n_workers;
Rick Ho's avatar
Rick Ho committed
107
    auto smgr = getCudaStreamManager(grad_out.device().index());
Rick Ho's avatar
Rick Ho committed
108
109
    int rank;
    ncclCommUserRank(smgr->ncclcomm, &rank);
Rick Ho's avatar
Rick Ho committed
110
111
112
113
    const auto d_model = grad_out.size(1);
    auto global_grad_out = grad_out.new_zeros({global_batch_size, d_model});
    auto global_grad_in = grad_out.new_zeros({global_batch_size, d_model});
    auto grad_in = grad_out.new_zeros({buf_batch_size, d_model});
Rick Ho's avatar
Rick Ho committed
114

Rick Ho's avatar
Rick Ho committed
115
116
    AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_out.scalar_type(), 
            "fmoe_cuda_smartsch_backward", ([&] {
Rick Ho's avatar
Rick Ho committed
117
        fmoe_cuda_fused_backward_impl(
Rick Ho's avatar
Rick Ho committed
118
119
            backward_fn,
            grad_out.device(),
Rick Ho's avatar
Rick Ho committed
120
121
122
123
124
125
126
127
128

            grad_out.data_ptr<scalar_t>(),
            global_grad_out.data_ptr<scalar_t>(),
            global_grad_in.data_ptr<scalar_t>(),
            grad_in.data_ptr<scalar_t>(),

            local_expert_count.data_ptr<long>(),
            global_expert_count.data_ptr<long>(),
            stored_models.data_ptr<bool>(),
Rick Ho's avatar
Rick Ho committed
129
            d_model, num_expert, rank, n_workers,
Rick Ho's avatar
Rick Ho committed
130
131
132
133
134
135
            pipeline_gran, smgr);
    }));
    return {grad_in,};
}
#endif