embedding.cpp 11.2 KB
Newer Older
xuxzh1's avatar
update  
xuxzh1 committed
1
#include "arg.h"
xuxzh1's avatar
init  
xuxzh1 committed
2
#include "common.h"
xuxzh1's avatar
update  
xuxzh1 committed
3
#include "log.h"
xuxzh1's avatar
init  
xuxzh1 committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#include "llama.h"

#include <ctime>

#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif

static std::vector<std::string> split_lines(const std::string & s, const std::string & separator = "\n") {
    std::vector<std::string> lines;
    size_t start = 0;
    size_t end = s.find(separator);

    while (end != std::string::npos) {
        lines.push_back(s.substr(start, end - start));
        start = end + separator.length();
        end = s.find(separator, start);
    }

    lines.push_back(s.substr(start)); // Add the last part

    return lines;
}

static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
    size_t n_tokens = tokens.size();
    for (size_t i = 0; i < n_tokens; i++) {
xuxzh1's avatar
update  
xuxzh1 committed
31
        common_batch_add(batch, tokens[i], i, { seq_id }, true);
xuxzh1's avatar
init  
xuxzh1 committed
32
33
34
35
    }
}

static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) {
xuxzh1's avatar
update  
xuxzh1 committed
36
37
38
    const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
    const struct llama_model * model = llama_get_model(ctx);

xuxzh1's avatar
init  
xuxzh1 committed
39
40
41
42
    // clear previous kv_cache values (irrelevant for embeddings)
    llama_kv_cache_clear(ctx);

    // run model
xuxzh1's avatar
update  
xuxzh1 committed
43
44
45
46
47
48
49
50
51
52
53
    LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
    if (llama_model_has_encoder(model) && !llama_model_has_decoder(model)) {
        // encoder-only model
        if (llama_encode(ctx, batch) < 0) {
            LOG_ERR("%s : failed to encode\n", __func__);
        }
    } else if (!llama_model_has_encoder(model) && llama_model_has_decoder(model)) {
        // decoder-only model
        if (llama_decode(ctx, batch) < 0) {
            LOG_ERR("%s : failed to decode\n", __func__);
        }
xuxzh1's avatar
init  
xuxzh1 committed
54
55
56
57
58
59
60
    }

    for (int i = 0; i < batch.n_tokens; i++) {
        if (!batch.logits[i]) {
            continue;
        }

xuxzh1's avatar
update  
xuxzh1 committed
61
62
63
64
65
66
67
68
69
70
71
72
73
74
        const float * embd = nullptr;
        int embd_pos = 0;

        if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
            // try to get token embeddings
            embd = llama_get_embeddings_ith(ctx, i);
            embd_pos = i;
            GGML_ASSERT(embd != NULL && "failed to get token embeddings");
        } else {
            // try to get sequence embeddings - supported only when pooling_type is not NONE
            embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
            embd_pos = batch.seq_id[i][0];
            GGML_ASSERT(embd != NULL && "failed to get sequence embeddings");
        }
xuxzh1's avatar
init  
xuxzh1 committed
75

xuxzh1's avatar
update  
xuxzh1 committed
76
77
        float * out = output + embd_pos * n_embd;
        common_embd_normalize(embd, out, n_embd, embd_norm);
xuxzh1's avatar
init  
xuxzh1 committed
78
79
80
81
    }
}

int main(int argc, char ** argv) {
xuxzh1's avatar
update  
xuxzh1 committed
82
    common_params params;
xuxzh1's avatar
init  
xuxzh1 committed
83

xuxzh1's avatar
update  
xuxzh1 committed
84
    if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EMBEDDING)) {
xuxzh1's avatar
init  
xuxzh1 committed
85
86
87
        return 1;
    }

xuxzh1's avatar
update  
xuxzh1 committed
88
89
    common_init();

xuxzh1's avatar
init  
xuxzh1 committed
90
91
92
93
94
95
96
97
    params.embedding = true;
    // For non-causal models, batch size must be equal to ubatch size
    params.n_ubatch = params.n_batch;

    llama_backend_init();
    llama_numa_init(params.numa);

    // load the model
xuxzh1's avatar
update  
xuxzh1 committed
98
    common_init_result llama_init = common_init_from_params(params);
xuxzh1's avatar
init  
xuxzh1 committed
99
100
101
102

    llama_model * model = llama_init.model;
    llama_context * ctx = llama_init.context;
    if (model == NULL) {
xuxzh1's avatar
update  
xuxzh1 committed
103
        LOG_ERR("%s: unable to load model\n", __func__);
xuxzh1's avatar
init  
xuxzh1 committed
104
105
106
107
108
109
110
        return 1;
    }

    const int n_ctx_train = llama_n_ctx_train(model);
    const int n_ctx = llama_n_ctx(ctx);

    const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
xuxzh1's avatar
update  
xuxzh1 committed
111
112
113

    if (llama_model_has_encoder(model) && llama_model_has_decoder(model)) {
        LOG_ERR("%s: computing embeddings in encoder-decoder models is not supported\n", __func__);
xuxzh1's avatar
init  
xuxzh1 committed
114
115
116
117
        return 1;
    }

    if (n_ctx > n_ctx_train) {
xuxzh1's avatar
update  
xuxzh1 committed
118
        LOG_WRN("%s: warning: model was trained on only %d context tokens (%d specified)\n",
xuxzh1's avatar
init  
xuxzh1 committed
119
120
121
122
123
                __func__, n_ctx_train, n_ctx);
    }

    // print system information
    {
xuxzh1's avatar
update  
xuxzh1 committed
124
125
        LOG_INF("\n");
        LOG_INF("%s\n", common_params_get_system_info(params).c_str());
xuxzh1's avatar
init  
xuxzh1 committed
126
127
128
129
130
131
132
133
134
135
136
137
    }

    // split the prompt into lines
    std::vector<std::string> prompts = split_lines(params.prompt, params.embd_sep);

    // max batch size
    const uint64_t n_batch = params.n_batch;
    GGML_ASSERT(params.n_batch >= params.n_ctx);

    // tokenize the prompts and trim
    std::vector<std::vector<int32_t>> inputs;
    for (const auto & prompt : prompts) {
xuxzh1's avatar
update  
xuxzh1 committed
138
        auto inp = common_tokenize(ctx, prompt, true, true);
xuxzh1's avatar
init  
xuxzh1 committed
139
        if (inp.size() > n_batch) {
xuxzh1's avatar
update  
xuxzh1 committed
140
            LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
xuxzh1's avatar
init  
xuxzh1 committed
141
142
143
144
145
146
147
148
149
150
                    __func__, (long long int) inp.size(), (long long int) n_batch);
            return 1;
        }
        inputs.push_back(inp);
    }

    // check if the last token is SEP
    // it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true'
    for (auto & inp : inputs) {
        if (inp.empty() || inp.back() != llama_token_sep(model)) {
xuxzh1's avatar
update  
xuxzh1 committed
151
152
            LOG_WRN("%s: last token in the prompt is not SEP\n", __func__);
            LOG_WRN("%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__);
xuxzh1's avatar
init  
xuxzh1 committed
153
154
155
156
157
158
        }
    }

    // tokenization stats
    if (params.verbose_prompt) {
        for (int i = 0; i < (int) inputs.size(); i++) {
xuxzh1's avatar
update  
xuxzh1 committed
159
160
            LOG_INF("%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
            LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
xuxzh1's avatar
init  
xuxzh1 committed
161
            for (int j = 0; j < (int) inputs[i].size(); j++) {
xuxzh1's avatar
update  
xuxzh1 committed
162
                LOG("%6d -> '%s'\n", inputs[i][j], common_token_to_piece(ctx, inputs[i][j]).c_str());
xuxzh1's avatar
init  
xuxzh1 committed
163
            }
xuxzh1's avatar
update  
xuxzh1 committed
164
            LOG("\n\n");
xuxzh1's avatar
init  
xuxzh1 committed
165
166
167
168
169
170
171
        }
    }

    // initialize batch
    const int n_prompts = prompts.size();
    struct llama_batch batch = llama_batch_init(n_batch, 0, 1);

xuxzh1's avatar
update  
xuxzh1 committed
172
173
174
175
176
177
178
179
180
181
    // count number of embeddings
    int n_embd_count = 0;
    if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
        for (int k = 0; k < n_prompts; k++) {
            n_embd_count += inputs[k].size();
        }
    } else {
        n_embd_count = n_prompts;
    }

xuxzh1's avatar
init  
xuxzh1 committed
182
183
    // allocate output
    const int n_embd = llama_n_embd(model);
xuxzh1's avatar
update  
xuxzh1 committed
184
    std::vector<float> embeddings(n_embd_count * n_embd, 0);
xuxzh1's avatar
init  
xuxzh1 committed
185
186
187
    float * emb = embeddings.data();

    // break into batches
xuxzh1's avatar
update  
xuxzh1 committed
188
    int e = 0; // number of embeddings already stored
xuxzh1's avatar
init  
xuxzh1 committed
189
190
191
192
193
194
195
196
197
    int s = 0; // number of prompts in current batch
    for (int k = 0; k < n_prompts; k++) {
        // clamp to n_batch tokens
        auto & inp = inputs[k];

        const uint64_t n_toks = inp.size();

        // encode if at capacity
        if (batch.n_tokens + n_toks > n_batch) {
xuxzh1's avatar
update  
xuxzh1 committed
198
            float * out = emb + e * n_embd;
xuxzh1's avatar
init  
xuxzh1 committed
199
            batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
xuxzh1's avatar
update  
xuxzh1 committed
200
            e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s;
xuxzh1's avatar
init  
xuxzh1 committed
201
            s = 0;
xuxzh1's avatar
update  
xuxzh1 committed
202
            common_batch_clear(batch);
xuxzh1's avatar
init  
xuxzh1 committed
203
204
205
206
207
208
209
210
        }

        // add to batch
        batch_add_seq(batch, inp, s);
        s += 1;
    }

    // final batch
xuxzh1's avatar
update  
xuxzh1 committed
211
    float * out = emb + e * n_embd;
xuxzh1's avatar
init  
xuxzh1 committed
212
213
214
    batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);

    if (params.embd_out.empty()) {
xuxzh1's avatar
update  
xuxzh1 committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
        LOG("\n");

        if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
            for (int j = 0; j < n_embd_count; j++) {
                LOG("embedding %d: ", j);
                for (int i = 0; i < std::min(3, n_embd); i++) {
                    if (params.embd_normalize == 0) {
                        LOG("%6.0f ", emb[j * n_embd + i]);
                    } else {
                        LOG("%9.6f ", emb[j * n_embd + i]);
                    }
                }
                LOG(" ... ");
                for (int i = n_embd - 3; i < n_embd; i++) {
                    if (params.embd_normalize == 0) {
                        LOG("%6.0f ", emb[j * n_embd + i]);
                    } else {
                        LOG("%9.6f ", emb[j * n_embd + i]);
                    }
xuxzh1's avatar
init  
xuxzh1 committed
234
                }
xuxzh1's avatar
update  
xuxzh1 committed
235
                LOG("\n");
xuxzh1's avatar
init  
xuxzh1 committed
236
            }
xuxzh1's avatar
update  
xuxzh1 committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
        } else if (pooling_type == LLAMA_POOLING_TYPE_RANK) {
            for (int j = 0; j < n_embd_count; j++) {
                // NOTE: if you change this log - update the tests in ci/run.sh
                LOG("rerank score %d: %8.3f\n", j, emb[j * n_embd]);
            }
        } else {
            // print the first part of the embeddings or for a single prompt, the full embedding
            for (int j = 0; j < n_prompts; j++) {
                LOG("embedding %d: ", j);
                for (int i = 0; i < (n_prompts > 1 ? std::min(16, n_embd) : n_embd); i++) {
                    if (params.embd_normalize == 0) {
                        LOG("%6.0f ", emb[j * n_embd + i]);
                    } else {
                        LOG("%9.6f ", emb[j * n_embd + i]);
                    }
                }
                LOG("\n");
xuxzh1's avatar
init  
xuxzh1 committed
254
            }
xuxzh1's avatar
update  
xuxzh1 committed
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270

            // print cosine similarity matrix
            if (n_prompts > 1) {
                LOG("\n");
                LOG("cosine similarity matrix:\n\n");
                for (int i = 0; i < n_prompts; i++) {
                    LOG("%6.6s ", prompts[i].c_str());
                }
                LOG("\n");
                for (int i = 0; i < n_prompts; i++) {
                    for (int j = 0; j < n_prompts; j++) {
                        float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
                        LOG("%6.2f ", sim);
                    }
                    LOG("%1.10s", prompts[i].c_str());
                    LOG("\n");
xuxzh1's avatar
init  
xuxzh1 committed
271
272
273
274
275
276
277
278
                }
            }
        }
    }

    if (params.embd_out == "json" || params.embd_out == "json+" || params.embd_out == "array") {
        const bool notArray = params.embd_out != "array";

xuxzh1's avatar
update  
xuxzh1 committed
279
        LOG(notArray ? "{\n  \"object\": \"list\",\n  \"data\": [\n" : "[");
xuxzh1's avatar
init  
xuxzh1 committed
280
        for (int j = 0;;) { // at least one iteration (one prompt)
xuxzh1's avatar
update  
xuxzh1 committed
281
282
            if (notArray) LOG("    {\n      \"object\": \"embedding\",\n      \"index\": %d,\n      \"embedding\": ",j);
            LOG("[");
xuxzh1's avatar
init  
xuxzh1 committed
283
            for (int i = 0;;) { // at least one iteration (n_embd > 0)
xuxzh1's avatar
update  
xuxzh1 committed
284
                LOG(params.embd_normalize == 0 ? "%1.0f" : "%1.7f", emb[j * n_embd + i]);
xuxzh1's avatar
init  
xuxzh1 committed
285
                i++;
xuxzh1's avatar
update  
xuxzh1 committed
286
                if (i < n_embd) LOG(","); else break;
xuxzh1's avatar
init  
xuxzh1 committed
287
            }
xuxzh1's avatar
update  
xuxzh1 committed
288
            LOG(notArray ? "]\n    }" : "]");
xuxzh1's avatar
init  
xuxzh1 committed
289
            j++;
xuxzh1's avatar
update  
xuxzh1 committed
290
            if (j < n_embd_count) LOG(notArray ? ",\n" : ","); else break;
xuxzh1's avatar
init  
xuxzh1 committed
291
        }
xuxzh1's avatar
update  
xuxzh1 committed
292
        LOG(notArray ? "\n  ]" : "]\n");
xuxzh1's avatar
init  
xuxzh1 committed
293
294

        if (params.embd_out == "json+" && n_prompts > 1) {
xuxzh1's avatar
update  
xuxzh1 committed
295
296
297
298
299
300
            LOG(",\n  \"cosineSimilarity\": [\n");
            for (int i = 0;;) { // at least two iteration (n_embd_count > 1)
                LOG("    [");
                for (int j = 0;;) { // at least two iteration (n_embd_count > 1)
                    float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
                    LOG("%6.2f", sim);
xuxzh1's avatar
init  
xuxzh1 committed
301
                    j++;
xuxzh1's avatar
update  
xuxzh1 committed
302
                    if (j < n_embd_count) LOG(", "); else break;
xuxzh1's avatar
init  
xuxzh1 committed
303
                }
xuxzh1's avatar
update  
xuxzh1 committed
304
                LOG(" ]");
xuxzh1's avatar
init  
xuxzh1 committed
305
                i++;
xuxzh1's avatar
update  
xuxzh1 committed
306
                if (i < n_embd_count) LOG(",\n"); else break;
xuxzh1's avatar
init  
xuxzh1 committed
307
            }
xuxzh1's avatar
update  
xuxzh1 committed
308
            LOG("\n  ]");
xuxzh1's avatar
init  
xuxzh1 committed
309
310
        }

xuxzh1's avatar
update  
xuxzh1 committed
311
        if (notArray) LOG("\n}\n");
xuxzh1's avatar
init  
xuxzh1 committed
312
313
    }

xuxzh1's avatar
update  
xuxzh1 committed
314
315
316
    LOG("\n");
    llama_perf_context_print(ctx);

xuxzh1's avatar
init  
xuxzh1 committed
317
318
319
320
321
322
323
324
    // clean up
    llama_batch_free(batch);
    llama_free(ctx);
    llama_free_model(model);
    llama_backend_free();

    return 0;
}