llama-adapter.h 1.88 KB
Newer Older
1
2
#pragma once

3
#include "llama.h"
4
5
6

#include "ggml-cpp.h"

7
#include <string>
8
9
10
#include <unordered_map>
#include <vector>

11
12
// TODO: pimpl

13
14
15
16
//
// llama_adapter_cvec
//

17
struct llama_adapter_cvec {
18
    ggml_tensor * tensor_for(int il) const;
19

20
    ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int  il) const;
21

22
    bool apply(
23
24
25
26
27
28
29
30
31
            const llama_model & model,
            const float * data,
            size_t len,
            int32_t n_embd,
            int32_t il_start,
            int32_t il_end);

private:
    bool init(const llama_model & model);
32
33
34
35

    int32_t layer_start = -1;
    int32_t layer_end   = -1;

36
37
    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;
38

39
    std::vector<ggml_tensor *> tensors; // per layer
40
41
42
43
44
45
};

//
// llama_adapter_lora
//

46
struct llama_adapter_lora_weight {
47
48
    ggml_tensor * a = nullptr;
    ggml_tensor * b = nullptr;
49

50
51
52
53
54
55
56
57
    // get actual scale based on rank and alpha
    float get_scale(float alpha, float adapter_scale) const {
        const float rank  = (float) b->ne[0];
        const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
        return scale;
    }

    llama_adapter_lora_weight() = default;
58
    llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {}
59
60
};

61
struct llama_adapter_lora {
62
    // map tensor name to lora_a_b
63
    std::unordered_map<std::string, llama_adapter_lora_weight> ab_map;
64
65
66
67
68
69

    std::vector<ggml_context_ptr> ctxs;
    std::vector<ggml_backend_buffer_ptr> bufs;

    float alpha;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
70
71
72
73
74
75
    // gguf metadata
    std::unordered_map<std::string, std::string> gguf_kv;

    // activated lora (aLoRA)
    std::vector<llama_token> alora_invocation_tokens;

76
77
    llama_adapter_lora() = default;
    ~llama_adapter_lora() = default;
78

79
    llama_adapter_lora_weight * get_weight(ggml_tensor * w);
80
};
81
82

using llama_adapter_loras = std::unordered_map<llama_adapter_lora *, float>;