paged_compiler.cpp 6.5 KB
Newer Older
1
2
#include "paged_compiler.hpp"

yaoht's avatar
yaoht committed
3
4
#include <spdlog/spdlog.h>

5
6
7
8
9
10
11
12
namespace {
// Todo: replace with Tensor::zeros when it is available
inline void set_zeros(infinicore::Tensor &tensor) {
    std::vector<uint8_t> zeros(tensor->nbytes(), 0);
    infinicore::context::memcpyH2D(tensor->data(), zeros.data(), tensor->nbytes(), false);
}

} // namespace
13
namespace infinilm::engine {
14
15
PagedCompiler::PagedCompiler(const std::shared_ptr<InfinilmModel> &model, RankBarrier *barrier)
    : GraphCompiler(model, barrier) {
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
    for (size_t b = 1; b < 32; b++) {
        decode_batch_sizes_.push_back(b);
    }
    for (size_t b = 32; b < 64; b += 8) {
        decode_batch_sizes_.push_back(b);
    }
    for (size_t b = 64; b < 128; b += 16) {
        decode_batch_sizes_.push_back(b);
    }
    for (size_t b = 128; b < 256; b += 32) {
        decode_batch_sizes_.push_back(b);
    }
    for (size_t b = 256; b <= 512; b += 64) {
        decode_batch_sizes_.push_back(b);
    }
}

void PagedCompiler::compile() {
    if (model_->get_cache_config() != nullptr && dynamic_cast<const cache::PagedKVCacheConfig *>(model_->get_cache_config())) {
        size_t nblocks = dynamic_cast<const cache::PagedKVCacheConfig *>(model_->get_cache_config())->num_blocks();
yaoht's avatar
yaoht committed
36
37
38
39
40

        // ///////////
        // 获取配置中的 block 大小(比如 16 或 32)
        int block_size = dynamic_cast<const cache::PagedKVCacheConfig *>(model_->get_cache_config())->block_size();
        /////////
41
42
43
        size_t max_batch_size = *std::max_element(decode_batch_sizes_.begin(), decode_batch_sizes_.end());
        compiled_map_decode_.clear();
        block_tables_holder_ = infinicore::Tensor::empty(
44
            {nblocks}, infinicore::DataType::I32, infinicore::context::getDevice());
45
        set_zeros(block_tables_holder_);
46
47
48
49
50
        for (size_t b : decode_batch_sizes_) {
            size_t block_per_req = nblocks / b;
            InfinilmModel::Input input;
            input.input_ids = infinicore::Tensor::empty({1, b}, infinicore::DataType::I64, infinicore::context::getDevice());
            input.position_ids = infinicore::Tensor::empty({b}, infinicore::DataType::I64, infinicore::context::getDevice());
51
            input.total_sequence_lengths = infinicore::Tensor::empty({b}, infinicore::DataType::I32, infinicore::context::getDevice());
52
53
54
            set_zeros(input.input_ids.value());
            set_zeros(input.position_ids.value());
            set_zeros(input.total_sequence_lengths.value());
55
56
            std::vector<int32_t> total_sequence_lengths_vec(b, 1);
            infinicore::context::memcpyH2D(input.total_sequence_lengths.value()->data(), total_sequence_lengths_vec.data(), b * sizeof(int32_t), false);
57
58
            input.input_offsets = infinicore::Tensor::empty({b + 1}, infinicore::DataType::I32, infinicore::context::getDevice());
            std::vector<int32_t> input_offsets_vec(b + 1, 0);
59
60
61
            for (size_t i = 0; i <= b; i++) {
                input_offsets_vec[i] = i;
            }
62
63
64
            infinicore::context::memcpyH2D(input.input_offsets.value()->data(), input_offsets_vec.data(), (b + 1) * sizeof(int32_t), false);
            input.cu_seqlens = infinicore::Tensor::empty({b + 1}, infinicore::DataType::I32, infinicore::context::getDevice());
            infinicore::context::memcpyH2D(input.cu_seqlens.value()->data(), input_offsets_vec.data(), (b + 1) * sizeof(int32_t), false);
65
66
            input.block_tables = block_tables_holder_->as_strided({b, block_per_req}, {(ptrdiff_t)block_per_req, 1});
            input.slot_mapping = infinicore::Tensor::empty({b}, infinicore::DataType::I64, infinicore::context::getDevice());
67
            set_zeros(input.slot_mapping.value());
68

yaoht's avatar
yaoht committed
69
70
71
72
73
74
75
76
77
            ////////////////
            // 从当前 dummy tensor 的 shape 中直接提取物理极限
            // 1. 对于 varlen,q 的最大长度就是 input_ids 的 token 总数 (这里是 1*b / b)
            // 如果你的 prefill 循环里 input_ids 分配的是 {seq_len, b},这行代码依然自适应生效
            input.max_seqlen_q = input.input_ids.value()->size(0); // 假设 shape 里的维 0 是 seq_len,维 1 是 batch

            // 2. max_seqlen_k 的绝对安全边界 = 当前分配的每请求 block 数 * 每个 block 的容量
            input.max_seqlen_k = block_per_req * block_size;
            /////////////
78
            barrier_->wait();
79
80
81
            infinicore::context::startGraphRecording();
            auto output = model_->forward(input);
            auto graph = infinicore::context::stopGraphRecording();
82
            barrier_->wait();
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

            auto shared_output = std::shared_ptr<InfinilmModel::Output>(
                new InfinilmModel::Output{infinicore::graph::GraphTensor(output.logits)});

            compiled_map_decode_[b] = CompiledResult{std::move(input), std::make_tuple(graph, shared_output)};
        }
    }
}

PagedCompiler::Compiled PagedCompiler::get_compiled(const InfinilmModel::Input &input) {
    if (model_->get_cache_config() != nullptr && dynamic_cast<const cache::PagedKVCacheConfig *>(model_->get_cache_config())) {
        size_t batch_size = input.block_tables.value()->size(0);
        size_t block_per_req = input.block_tables.value()->size(1);

        // only support decode only batch
        if (batch_size != input.input_ids.value()->size(1)) {
            return {nullptr, nullptr};
        } else {
            auto result = compiled_map_decode_.find(batch_size);
            if (result == compiled_map_decode_.end()) {
                return {nullptr, nullptr};
            }
            auto &graph_input = result->second.input;

            graph_input.input_ids.value()->copy_from(input.input_ids.value());
            graph_input.position_ids.value()->copy_from(input.position_ids.value());
            graph_input.total_sequence_lengths.value()->copy_from(input.total_sequence_lengths.value());
            graph_input.input_offsets.value()->copy_from(input.input_offsets.value());
111
            graph_input.cu_seqlens.value()->copy_from(input.cu_seqlens.value());
112
113
114
115
116
117
118
119
120
121
122
123
124
125
            graph_input.block_tables.value()->narrow({{1, 0, block_per_req}})->copy_from(input.block_tables.value());
            graph_input.slot_mapping.value()->copy_from(input.slot_mapping.value());

            auto graph = std::get<0>(result->second.compiled);
            auto shared_output = std::shared_ptr<InfinilmModel::Output>(new InfinilmModel::Output{std::get<1>(result->second.compiled)->logits->resume_from_blob_()});

            return std::make_tuple(graph, shared_output);
        }
    } else {
        return {nullptr, nullptr};
    }
}

} // namespace infinilm::engine