llama-kv-cache-iswa.h 4.02 KB
Newer Older
1
2
#pragma once

Daniel Hiltgen's avatar
Daniel Hiltgen committed
3
#include "llama-kv-cache.h"
4
5
6
7

#include <vector>

//
Daniel Hiltgen's avatar
Daniel Hiltgen committed
8
// llama_kv_cache_iswa
9
10
//

Daniel Hiltgen's avatar
Daniel Hiltgen committed
11
// utilizes two instances of llama_kv_cache
12
13
//   the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers

Daniel Hiltgen's avatar
Daniel Hiltgen committed
14
class llama_kv_cache_iswa : public llama_memory_i {
15
public:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
16
    llama_kv_cache_iswa(
17
18
19
20
21
22
23
24
25
26
            const llama_model & model,
                    ggml_type   type_k,
                    ggml_type   type_v,
                         bool   v_trans,
                         bool   offload,
                         bool   swa_full,
                         bool   unified,
                     uint32_t   kv_size,
                     uint32_t   n_seq_max,
                     uint32_t   n_ubatch,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
27
28
29
                     uint32_t   n_pad,
        const layer_filter_cb & filter,
        const  layer_reuse_cb & reuse);
30

Daniel Hiltgen's avatar
Daniel Hiltgen committed
31
    ~llama_kv_cache_iswa() = default;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

    //
    // llama_memory_i
    //

    llama_memory_context_ptr init_batch(
            llama_batch_allocr & balloc,
            uint32_t n_ubatch,
            bool embd_all) override;

    llama_memory_context_ptr init_full() override;

    llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;

    bool get_can_shift() const override;

    void clear(bool data) override;

    bool seq_rm  (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1) override;
    void seq_cp  (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
    void seq_keep(llama_seq_id seq_id)                                                          override;
    void seq_add (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1, llama_pos shift) override;
    void seq_div (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1, int d) override;

    llama_pos seq_pos_min(llama_seq_id seq_id) const override;
    llama_pos seq_pos_max(llama_seq_id seq_id) const override;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60
    std::map<ggml_backend_buffer_type_t, size_t> memory_breakdown() const override;

61
62
    // state write/load

Daniel Hiltgen's avatar
Daniel Hiltgen committed
63
64
    void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) const override;
    void state_read (llama_io_read_i  & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) override;
65
66

    //
Daniel Hiltgen's avatar
Daniel Hiltgen committed
67
    // llama_kv_cache_iswa specific API
68
69
    //

Daniel Hiltgen's avatar
Daniel Hiltgen committed
70
71
    llama_kv_cache * get_base() const;
    llama_kv_cache * get_swa () const;
72
73
74
75
76
77

private:
    const llama_hparams & hparams;

    const bool unified;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
79
    std::unique_ptr<llama_kv_cache> kv_base;
    std::unique_ptr<llama_kv_cache> kv_swa;
80
81
};

Daniel Hiltgen's avatar
Daniel Hiltgen committed
82
class llama_kv_cache_iswa_context : public llama_memory_context_i {
83
public:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
    using slot_info_vec_t = llama_kv_cache::slot_info_vec_t;
85
86

    // used for errors
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
    llama_kv_cache_iswa_context(llama_memory_status status);
88
89

    // used to create a full-cache context
Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
91
    llama_kv_cache_iswa_context(
            llama_kv_cache_iswa * kv);
92
93

    // used to create an update context
Daniel Hiltgen's avatar
Daniel Hiltgen committed
94
95
    llama_kv_cache_iswa_context(
            llama_kv_cache_iswa * kv,
96
97
98
99
            llama_context * lctx,
            bool optimize);

    // used to create a batch processing context from a batch
Daniel Hiltgen's avatar
Daniel Hiltgen committed
100
101
    llama_kv_cache_iswa_context(
            llama_kv_cache_iswa * kv,
102
103
104
105
            slot_info_vec_t sinfos_base,
            slot_info_vec_t sinfos_swa,
            std::vector<llama_ubatch> ubatches);

Daniel Hiltgen's avatar
Daniel Hiltgen committed
106
    virtual ~llama_kv_cache_iswa_context();
107
108
109
110
111
112
113
114
115
116
117
118

    //
    // llama_memory_context_i
    //

    bool next()  override;
    bool apply() override;

    llama_memory_status  get_status() const override;
    const llama_ubatch & get_ubatch() const override;

    //
Daniel Hiltgen's avatar
Daniel Hiltgen committed
119
    // llama_kv_cache_iswa_context specific API
120
121
    //

Daniel Hiltgen's avatar
Daniel Hiltgen committed
122
123
    const llama_kv_cache_context * get_base() const;
    const llama_kv_cache_context * get_swa()  const;
124
125

private:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
126
    //llama_kv_cache_iswa * kv;
127
128
129
130
131
132
133
134
135
136
137

    // the index of the next ubatch to process
    size_t i_next = 0;

    std::vector<llama_ubatch> ubatches;

    const llama_memory_context_ptr ctx_base;
    const llama_memory_context_ptr ctx_swa;

    const llama_memory_status status;
};