llama-adapter.h 1.72 KB
Newer Older
1
2
#pragma once

3
#include "llama.h"
4
5
6

#include "ggml-cpp.h"

7
#include <string>
8
9
10
#include <unordered_map>
#include <vector>

11
12
// TODO: pimpl

13
14
15
16
//
// llama_adapter_cvec
//

17
18
struct llama_adapter_cvec {
    struct ggml_tensor * tensor_for(int il) const;
19

20
21
22
23
24
25
26
27
28
29
30
31
    struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const;

    int32_t apply(
            const llama_model & model,
            const float * data,
            size_t len,
            int32_t n_embd,
            int32_t il_start,
            int32_t il_end);

private:
    bool init(const llama_model & model);
32
33
34
35

    int32_t layer_start = -1;
    int32_t layer_end   = -1;

36
37
    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;
38

39
    std::vector<struct ggml_tensor *> tensors; // per layer
40
41
42
43
44
45
};

//
// llama_adapter_lora
//

46
struct llama_adapter_lora_weight {
47
48
49
    struct ggml_tensor * a = nullptr;
    struct ggml_tensor * b = nullptr;

50
51
52
53
54
55
56
57
58
    // get actual scale based on rank and alpha
    float get_scale(float alpha, float adapter_scale) const {
        const float rank  = (float) b->ne[0];
        const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
        return scale;
    }

    llama_adapter_lora_weight() = default;
    llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
59
60
};

61
struct llama_adapter_lora {
62
    // map tensor name to lora_a_b
63
    std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
64
65
66
67
68
69

    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;

    float alpha;

70
71
    llama_adapter_lora() = default;
    ~llama_adapter_lora() = default;
72

73
    llama_adapter_lora_weight * get_weight(struct ggml_tensor * w);
74
};