llama-adapter.h 1.52 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#pragma once

#include "llama-impl.h"
#include "llama-hparams.h"

#include "ggml-cpp.h"

#include <unordered_map>
#include <vector>

//
// llama_adapter_cvec
//

// TODO: rename to llama_adapter_cvec
struct llama_control_vector {
    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;

    std::vector<struct ggml_tensor *> tensors; // per layer

    int32_t layer_start = -1;
    int32_t layer_end   = -1;

    struct ggml_tensor * tensor_for(int il) const;

    struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const;
};

int32_t llama_control_vector_apply(
        struct llama_control_vector & cvec,
        const llama_model & model,
        const float * data,
        size_t len,
        int32_t n_embd,
        int32_t il_start,
        int32_t il_end);

//
// llama_adapter_lora
//

// TODO: rename to llama_adapter_lora_weight
struct llama_lora_weight {
    struct ggml_tensor * a = nullptr;
    struct ggml_tensor * b = nullptr;

    llama_lora_weight() = default;
    llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
};

// TODO: rename to llama_adapter_lora
struct llama_lora_adapter {
    // map tensor name to lora_a_b
    std::unordered_map<std::string, struct llama_lora_weight> ab_map;

    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;

    float alpha;

    llama_lora_adapter() = default;
    ~llama_lora_adapter() = default;

    llama_lora_weight * get_weight(struct ggml_tensor * w);
};