gritlm.cpp 9.64 KB
Newer Older
xuxzh1's avatar
update  
xuxzh1 committed
1
#include "arg.h"
xuxzh1's avatar
init  
xuxzh1 committed
2
3
4
5
6
7
8
9
10
11
12
#include "common.h"
#include "llama.h"

#include <string>
#include <vector>

// #define GRIT_DEBUG

static std::vector<std::vector<float>> encode(llama_context * ctx, const std::vector<std::string> & sentences, const std::string & instruction) {
    std::vector<std::vector<float>> result;

xuxzh1's avatar
update  
xuxzh1 committed
13
    const llama_model * model = llama_get_model(ctx);
xuxzh1's avatar
init  
xuxzh1 committed
14
15
16
17

    llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1);

    for (uint64_t i = 0; i < sentences.size(); i++) {
xuxzh1's avatar
update  
xuxzh1 committed
18
        common_batch_clear(batch);
xuxzh1's avatar
init  
xuxzh1 committed
19
20
21

        const std::string input_string = instruction + sentences[i];

xuxzh1's avatar
update  
xuxzh1 committed
22
        std::vector<llama_token> inputs = common_tokenize(model, input_string, true, false);
xuxzh1's avatar
init  
xuxzh1 committed
23
24
25
26
27

        const int32_t n_toks = inputs.size();

        // GritLM seems to have EOS = ""
        // https://github.com/ContextualAI/gritlm/blob/92025b16534712b31b3c4aaaf069350e222bd5f8/gritlm/gritlm.py#L18
xuxzh1's avatar
update  
xuxzh1 committed
28
        // inputs.push_back(llama_token_eos(model));
xuxzh1's avatar
init  
xuxzh1 committed
29
30

        // we want to ignore instruction tokens for mean pooling
xuxzh1's avatar
update  
xuxzh1 committed
31
        const int32_t n_inst = common_tokenize(model, instruction, true, false).size();
xuxzh1's avatar
init  
xuxzh1 committed
32
33
34
35
36
37
38
39
40
41
42

#ifdef GRIT_DEBUG
        // debug tokens - should be matching as referenced in the GritLM sample
        std::for_each(inputs.begin(), inputs.end(), [&ctx](llama_token t) {
            std::printf("[%u:%s]", t, llama_token_to_piece(ctx, t).c_str());
        });
        std::printf("\n");
#endif

        // add input to batch (this increments n_tokens)
        for (int32_t j = 0; j < n_toks; j++) {
xuxzh1's avatar
update  
xuxzh1 committed
43
            common_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
xuxzh1's avatar
init  
xuxzh1 committed
44
45
46
47
48
49
50
51
52
53
54
        }

        // clear previous kv_cache values (irrelevant for embeddings)
        llama_kv_cache_clear(ctx);
        llama_set_embeddings(ctx, true);
        llama_set_causal_attn(ctx, false);

        // run model
        llama_decode(ctx, batch);

        // get embedding dimensions
xuxzh1's avatar
update  
xuxzh1 committed
55
        uint64_t n_embd = llama_n_embd(model);
xuxzh1's avatar
init  
xuxzh1 committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

        // allocate embedding output
        std::vector<float> emb_unorm(n_embd, 0.0f);

        // sum up all token embeddings
        for (int32_t k = n_inst; k < n_toks; k++) {
            float * emb = llama_get_embeddings_ith(ctx, k);
            for (uint64_t j = 0; j < n_embd; j++) {
                emb_unorm[j] += emb[j];
            }
        }

        // divide by number of tokens (mean pooling)
        {
            const uint64_t n_sent = n_toks - n_inst;

            for (uint64_t j = 0; j < n_embd; j++) {
                emb_unorm[j] /= n_sent;
            }
        }

        std::vector<float> emb_norm(emb_unorm.size());
xuxzh1's avatar
update  
xuxzh1 committed
78
        common_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
xuxzh1's avatar
init  
xuxzh1 committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        result.push_back(emb_norm);

#ifdef GRIT_DEBUG
        // print out emb_norm
        std::printf("embedding %ld: ", i);
        for (uint64_t j = 0; j < n_embd; j++) {
            std::printf("%.5f ", emb_norm[j]);
        }
        std::printf("\n\n");
#endif
    }

    llama_batch_free(batch);

    return result;
}

xuxzh1's avatar
update  
xuxzh1 committed
96
static std::string generate(llama_context * ctx, llama_sampler * smpl, const std::string & prompt, bool stream) {
xuxzh1's avatar
init  
xuxzh1 committed
97
98
    std::string result;

xuxzh1's avatar
update  
xuxzh1 committed
99
100
    const llama_model * model = llama_get_model(ctx);
    llama_token eos_token = llama_token_eos(model);
xuxzh1's avatar
init  
xuxzh1 committed
101
102
103
104
105
106
107

    llama_kv_cache_clear(ctx);
    llama_set_embeddings(ctx, false);
    llama_set_causal_attn(ctx, true);

    llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);

xuxzh1's avatar
update  
xuxzh1 committed
108
    std::vector<llama_token> inputs = common_tokenize(model, prompt, false, true);
xuxzh1's avatar
init  
xuxzh1 committed
109
110
111
    int32_t i_current_token = 0;

    while (true) {
xuxzh1's avatar
update  
xuxzh1 committed
112
113
114
115
116
117
118
        common_batch_clear(bat);
        {
            const int32_t n_inputs = inputs.size();

            for (int32_t i = 0; i < n_inputs; i++) {
                common_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
            }
xuxzh1's avatar
init  
xuxzh1 committed
119
120
121
122
123
        }
        inputs.clear();

        llama_decode(ctx, bat);

xuxzh1's avatar
update  
xuxzh1 committed
124
        llama_token token = llama_sampler_sample(smpl, ctx, bat.n_tokens - 1);
xuxzh1's avatar
init  
xuxzh1 committed
125
126
127
128
129

        if (token == eos_token) {
            break;
        }

xuxzh1's avatar
update  
xuxzh1 committed
130
        std::string piece = common_token_to_piece(ctx, token);
xuxzh1's avatar
init  
xuxzh1 committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
        if (stream) {
            std::printf("%s", piece.c_str());
            std::fflush(stdout);
        }

        inputs.push_back(token);

        result += piece;
    }

    if (stream) {
        std::printf("\n");
    }

    llama_batch_free(bat);

    return result;
}

static std::string gritlm_instruction(const std::string & instruction) {
    return !instruction.empty() ? "<|user|>\n" + instruction + "\n<|embed|>\n" : "<|embed|>\n";
}

int main(int argc, char * argv[]) {
xuxzh1's avatar
update  
xuxzh1 committed
155
    common_params params;
xuxzh1's avatar
init  
xuxzh1 committed
156

xuxzh1's avatar
update  
xuxzh1 committed
157
    if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
xuxzh1's avatar
init  
xuxzh1 committed
158
159
160
        return 1;
    }

xuxzh1's avatar
update  
xuxzh1 committed
161
162
163
164
    common_init();

    llama_model_params mparams = common_model_params_to_llama(params);
    llama_context_params cparams = common_context_params_to_llama(params);
xuxzh1's avatar
init  
xuxzh1 committed
165
166
167

    llama_backend_init();

xuxzh1's avatar
update  
xuxzh1 committed
168
    llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
xuxzh1's avatar
init  
xuxzh1 committed
169
170

    // create generation context
xuxzh1's avatar
update  
xuxzh1 committed
171
172
173
174
175
176
177
178
179
    llama_context * ctx = llama_new_context_with_model(model, cparams);

    auto sparams = llama_sampler_chain_default_params();

    sparams.no_perf = false;

    llama_sampler * smpl = llama_sampler_chain_init(sparams);

    llama_sampler_chain_add(smpl, llama_sampler_init_greedy());
xuxzh1's avatar
init  
xuxzh1 committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

    // ### Embedding/Representation ###
    // samples taken from: https://github.com/ContextualAI/gritlm#basic
    {
        const std::string instruction = "Given a scientific paper title, retrieve the paper's abstract";

        const std::vector<std::string> queries = {
            "Bitcoin: A Peer-to-Peer Electronic Cash System",
            "Generative Representational Instruction Tuning",
        };

        const std::vector<std::string> documents = {
            "A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone.",
            "All text-based language problems can be reduced to either generation or embedding. Current models only perform well at one or the other. We introduce generative representational instruction tuning (GRIT) whereby a large language model is trained to handle both generative and embedding tasks by distinguishing between them through instructions. Compared to other open models, our resulting GritLM 7B sets a new state of the art on the Massive Text Embedding Benchmark (MTEB) and outperforms all models up to its size on a range of generative tasks. By scaling up further, GritLM 8X7B outperforms all open generative language models that we tried while still being among the best embedding models. Notably, we find that GRIT matches training on only generative or embedding data, thus we can unify both at no performance loss. Among other benefits, the unification via GRIT speeds up Retrieval-Augmented Generation (RAG) by > 60% for long documents, by no longer requiring separate retrieval and generation models. Models, code, etc. are freely available at https://github.com/ContextualAI/gritlm.",
        };

        // No need to add instruction for retrieval documents
        const std::vector<std::vector<float>> d_rep = encode(ctx, documents, gritlm_instruction(""));
        const std::vector<std::vector<float>> q_rep = encode(ctx, queries,   gritlm_instruction(instruction));

xuxzh1's avatar
update  
xuxzh1 committed
200
        const int n_embd = llama_n_embd(model);
xuxzh1's avatar
init  
xuxzh1 committed
201

xuxzh1's avatar
update  
xuxzh1 committed
202
203
204
205
        const float cosine_sim_q0_d0 = common_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
        const float cosine_sim_q0_d1 = common_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
        const float cosine_sim_q1_d0 = common_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
        const float cosine_sim_q1_d1 = common_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
xuxzh1's avatar
init  
xuxzh1 committed
206
207
208
209
210
211
212
213
214
215
216

        std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[0].c_str(), cosine_sim_q0_d0);
        std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[1].c_str(), cosine_sim_q0_d1);
        std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[0].c_str(), cosine_sim_q1_d0);
        std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[1].c_str(), cosine_sim_q1_d1);
    }

    // ### Generation ###
    // GritLM models are not finetuned with system prompts, as you can just include system-like instructions together with your user instruction
    {
        const std::string prompt = "<|user|>\nPlease write me a poem about my recent hike of Mt. Fuji at midnight in the style of Shakespeare.\n<|assistant|>\n";
xuxzh1's avatar
update  
xuxzh1 committed
217
        std::string response = generate(ctx, smpl, prompt, true);
xuxzh1's avatar
init  
xuxzh1 committed
218
219
    }

xuxzh1's avatar
update  
xuxzh1 committed
220
    llama_sampler_free(smpl);
xuxzh1's avatar
init  
xuxzh1 committed
221
    llama_free(ctx);
xuxzh1's avatar
update  
xuxzh1 committed
222
    llama_free_model(model);
xuxzh1's avatar
init  
xuxzh1 committed
223
224
225
226
    llama_backend_free();

    return 0;
}