binding.cpp 22.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
// MIT License

// Copyright (c) 2023 go-skynet authors

// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:

// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.

// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.

Jeffrey Morgan's avatar
Jeffrey Morgan committed
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#include "common.h"
#include "llama.h"

#include "binding.h"

#include <cassert>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <iostream>
#include <regex>
#include <sstream>
#include <string>
#include <vector>
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
#include <signal.h>
#include <unistd.h>
#elif defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <signal.h>
#include <windows.h>
#endif

Bruce MacDonald's avatar
Bruce MacDonald committed
49
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || \
Jeffrey Morgan's avatar
Jeffrey Morgan committed
50
51
52
53
54
55
56
57
    defined(_WIN32)
void sigint_handler(int signo) {
  if (signo == SIGINT) {
    _exit(130);
  }
}
#endif

Jeffrey Morgan's avatar
Jeffrey Morgan committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
  gpt_params *params_p = (gpt_params *)params_ptr;
  llama_context *ctx = (llama_context *)state_pr;
  gpt_params params = *params_p;

  if (params.seed <= 0) {
    params.seed = time(NULL);
  }

  std::mt19937 rng(params.seed);

  llama_init_backend(params.numa);

  int n_past = 0;

  // Add a space in front of the first character to match OG llama tokenizer
  // behavior
  params.prompt.insert(0, 1, ' ');

  // tokenize the prompt
  auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);

  // determine newline token
  auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);

  if (embd_inp.size() > 0) {
    if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
                   params.n_threads)) {
      fprintf(stderr, "%s : failed to eval\n", __func__);
      return 1;
    }
  }

  const int n_embd = llama_n_embd(ctx);

  const auto embeddings = llama_get_embeddings(ctx);

  for (int i = 0; i < n_embd; i++) {
    res_embeddings[i] = embeddings[i];
  }

  return 0;
}

int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
                         int tokenSize, float *res_embeddings) {
  gpt_params *params_p = (gpt_params *)params_ptr;
  llama_context *ctx = (llama_context *)state_pr;
  gpt_params params = *params_p;

  for (int i = 0; i < tokenSize; i++) {
    auto token_str = llama_token_to_str(ctx, tokens[i]);
    if (token_str == nullptr) {
      continue;
    }
    std::vector<std::string> my_vector;
    std::string str_token(token_str); // create a new std::string from the char*
    params_p->prompt += str_token;
  }

  return get_embeddings(params_ptr, state_pr, res_embeddings);
}

int eval(void *params_ptr, void *state_pr, char *text) {
  gpt_params *params_p = (gpt_params *)params_ptr;
  llama_context *ctx = (llama_context *)state_pr;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
124
125

  auto n_past = 0;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
126
127
  auto last_n_tokens_data =
      std::vector<llama_token>(params_p->repeat_last_n, 0);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
128

Jeffrey Morgan's avatar
Jeffrey Morgan committed
129
  auto tokens = std::vector<llama_token>(params_p->n_ctx);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
130
131
132
133
134
135
136
137
  auto n_prompt_tokens =
      llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);

  if (n_prompt_tokens < 1) {
    fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
    return 1;
  }

Jeffrey Morgan's avatar
Jeffrey Morgan committed
138
  // evaluate prompt
Jeffrey Morgan's avatar
Jeffrey Morgan committed
139
  return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
Jeffrey Morgan's avatar
Jeffrey Morgan committed
140
                    params_p->n_threads);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
141
142
}

Jeffrey Morgan's avatar
Jeffrey Morgan committed
143
144
145
146
147
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
  gpt_params *params_p = (gpt_params *)params_ptr;
  llama_context *ctx = (llama_context *)state_pr;

  gpt_params params = *params_p;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
148
149
150

  const int n_ctx = llama_n_ctx(ctx);

Jeffrey Morgan's avatar
Jeffrey Morgan committed
151
152
  if (params.seed <= 0) {
    params.seed = time(NULL);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
153
154
  }

Jeffrey Morgan's avatar
Jeffrey Morgan committed
155
156
157
  std::mt19937 rng(params.seed);

  std::string path_session = params.path_prompt_cache;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  std::vector<llama_token> session_tokens;

  if (!path_session.empty()) {
    if (debug) {
      fprintf(stderr, "%s: attempting to load saved session from '%s'\n",
              __func__, path_session.c_str());
    }
    // fopen to check for existing session
    FILE *fp = std::fopen(path_session.c_str(), "rb");
    if (fp != NULL) {
      std::fclose(fp);

      session_tokens.resize(n_ctx);
      size_t n_token_count_out = 0;
      if (!llama_load_session_file(
              ctx, path_session.c_str(), session_tokens.data(),
              session_tokens.capacity(), &n_token_count_out)) {
        fprintf(stderr, "%s: error: failed to load session file '%s'\n",
                __func__, path_session.c_str());
        return 1;
      }
      session_tokens.resize(n_token_count_out);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
180
      llama_set_rng_seed(ctx, params.seed);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
181
182
183
184
185
186
187
188
189
190
191
192
193
      if (debug) {
        fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
                __func__, (int)session_tokens.size());
      }
    } else {
      if (debug) {
        fprintf(stderr, "%s: session file does not exist, will create\n",
                __func__);
      }
    }
  }

  std::vector<llama_token> embd_inp;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
194
  if (!params.prompt.empty() || session_tokens.empty()) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
195
196
    // Add a space in front of the first character to match OG llama tokenizer
    // behavior
Jeffrey Morgan's avatar
Jeffrey Morgan committed
197
    params.prompt.insert(0, 1, ' ');
Jeffrey Morgan's avatar
Jeffrey Morgan committed
198

Jeffrey Morgan's avatar
Jeffrey Morgan committed
199
    embd_inp = ::llama_tokenize(ctx, params.prompt, true);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
  } else {
    embd_inp = session_tokens;
  }

  // debug message about similarity of saved session, if applicable
  size_t n_matching_session_tokens = 0;
  if (session_tokens.size()) {
    for (llama_token id : session_tokens) {
      if (n_matching_session_tokens >= embd_inp.size() ||
          id != embd_inp[n_matching_session_tokens]) {
        break;
      }
      n_matching_session_tokens++;
    }
    if (debug) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
215
      if (params.prompt.empty() &&
Jeffrey Morgan's avatar
Jeffrey Morgan committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
          n_matching_session_tokens == embd_inp.size()) {
        fprintf(stderr, "%s: using full prompt from session file\n", __func__);
      } else if (n_matching_session_tokens >= embd_inp.size()) {
        fprintf(stderr, "%s: session file has exact match for prompt!\n",
                __func__);
      } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
        fprintf(stderr,
                "%s: warning: session file has low similarity to prompt (%zu / "
                "%zu tokens); will mostly be reevaluated\n",
                __func__, n_matching_session_tokens, embd_inp.size());
      } else {
        fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
                __func__, n_matching_session_tokens, embd_inp.size());
      }
    }
  }
  // if we will use the cache for the full prompt without reaching the end of
  // the cache, force reevaluation of the last token token to recalculate the
  // cached logits
  if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() &&
      session_tokens.size() > embd_inp.size()) {
    session_tokens.resize(embd_inp.size() - 1);
  }
  // number of tokens to keep when resetting context
Jeffrey Morgan's avatar
Jeffrey Morgan committed
240
241
  if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
    params.n_keep = (int)embd_inp.size();
Jeffrey Morgan's avatar
Jeffrey Morgan committed
242
243
244
245
246
247
248
249
250
251
252
253
  }

  // determine newline token
  auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);

  // TODO: replace with ring-buffer
  std::vector<llama_token> last_n_tokens(n_ctx);
  std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);

  bool need_to_save_session =
      !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  int n_past = 0;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
254
  int n_remain = params.n_predict;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
255
256
257
258
259
260
261
262
263
264
265
  int n_consumed = 0;
  int n_session_consumed = 0;

  std::vector<llama_token> embd;
  std::string res = "";

  // do one empty run to warm up the model
  {
    const std::vector<llama_token> tmp = {
        llama_token_bos(),
    };
Jeffrey Morgan's avatar
Jeffrey Morgan committed
266
    llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
267
268
269
270
271
272
273
274
275
276
277
278
    llama_reset_timings(ctx);
  }

  while (n_remain != 0) {
    // predict
    if (embd.size() > 0) {
      // infinite text generation via context swapping
      // if we run out of context:
      // - take the n_keep first tokens from the original prompt (via n_past)
      // - take half of the last (n_ctx - n_keep) tokens and recompute the
      // logits in batches
      if (n_past + (int)embd.size() > n_ctx) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
279
        const int n_left = n_past - params.n_keep;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
280
281

        // always keep the first token - BOS
Jeffrey Morgan's avatar
Jeffrey Morgan committed
282
        n_past = std::max(1, params.n_keep);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
283
284
285
286
287
288
289
290

        // insert n_left/2 tokens at the start of embd from last_n_tokens
        embd.insert(embd.begin(),
                    last_n_tokens.begin() + n_ctx - n_left / 2 - embd.size(),
                    last_n_tokens.end() - embd.size());

        // stop saving session if we run out of context
        path_session.clear();
Jeffrey Morgan's avatar
Jeffrey Morgan committed
291
292
293
294
295
296
297
298

        // printf("\n---\n");
        // printf("resetting: '");
        // for (int i = 0; i < (int) embd.size(); i++) {
        //     printf("%s", llama_token_to_str(ctx, embd[i]));
        // }
        // printf("'\n");
        // printf("\n---\n");
Jeffrey Morgan's avatar
Jeffrey Morgan committed
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
      }

      // try to reuse a matching prefix from the loaded session instead of
      // re-eval (via n_past)
      if (n_session_consumed < (int)session_tokens.size()) {
        size_t i = 0;
        for (; i < embd.size(); i++) {
          if (embd[i] != session_tokens[n_session_consumed]) {
            session_tokens.resize(n_session_consumed);
            break;
          }

          n_past++;
          n_session_consumed++;

          if (n_session_consumed >= (int)session_tokens.size()) {
            ++i;
            break;
          }
        }
        if (i > 0) {
          embd.erase(embd.begin(), embd.begin() + i);
        }
      }

      // evaluate tokens in batches
      // embd is typically prepared beforehand to fit within a batch, but not
      // always
Jeffrey Morgan's avatar
Jeffrey Morgan committed
327
      for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
328
        int n_eval = (int)embd.size() - i;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
329
330
        if (n_eval > params.n_batch) {
          n_eval = params.n_batch;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
331
        }
Jeffrey Morgan's avatar
Jeffrey Morgan committed
332
        if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
          fprintf(stderr, "%s : failed to eval\n", __func__);
          return 1;
        }
        n_past += n_eval;
      }

      if (embd.size() > 0 && !path_session.empty()) {
        session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
        n_session_consumed = session_tokens.size();
      }
    }

    embd.clear();

    if ((int)embd_inp.size() <= n_consumed) {
      // out of user input, sample next token
Jeffrey Morgan's avatar
Jeffrey Morgan committed
349
      const float temp = params.temp;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
350
      const int32_t top_k =
Jeffrey Morgan's avatar
Jeffrey Morgan committed
351
352
353
354
          params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
      const float top_p = params.top_p;
      const float tfs_z = params.tfs_z;
      const float typical_p = params.typical_p;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
355
      const int32_t repeat_last_n =
Jeffrey Morgan's avatar
Jeffrey Morgan committed
356
357
358
359
360
361
362
363
          params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
      const float repeat_penalty = params.repeat_penalty;
      const float alpha_presence = params.presence_penalty;
      const float alpha_frequency = params.frequency_penalty;
      const int mirostat = params.mirostat;
      const float mirostat_tau = params.mirostat_tau;
      const float mirostat_eta = params.mirostat_eta;
      const bool penalize_nl = params.penalize_nl;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
364
365
366
367

      // optionally save the session on first sample (for faster prompt loading
      // next time)
      if (!path_session.empty() && need_to_save_session &&
Jeffrey Morgan's avatar
Jeffrey Morgan committed
368
          !params.prompt_cache_ro) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
369
370
371
372
373
374
375
376
377
378
379
380
        need_to_save_session = false;
        llama_save_session_file(ctx, path_session.c_str(),
                                session_tokens.data(), session_tokens.size());
      }

      llama_token id = 0;

      {
        auto logits = llama_get_logits(ctx);
        auto n_vocab = llama_n_vocab(ctx);

        // Apply params.logit_bias map
Jeffrey Morgan's avatar
Jeffrey Morgan committed
381
382
        for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
             it++) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
          logits[it->first] += it->second;
        }

        std::vector<llama_token_data> candidates;
        candidates.reserve(n_vocab);
        for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
          candidates.emplace_back(
              llama_token_data{token_id, logits[token_id], 0.0f});
        }

        llama_token_data_array candidates_p = {candidates.data(),
                                               candidates.size(), false};

        // Apply penalties
        float nl_logit = logits[llama_token_nl()];
        auto last_n_repeat =
            std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
        llama_sample_repetition_penalty(
            ctx, &candidates_p,
            last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
            last_n_repeat, repeat_penalty);
        llama_sample_frequency_and_presence_penalties(
            ctx, &candidates_p,
            last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
            last_n_repeat, alpha_frequency, alpha_presence);
        if (!penalize_nl) {
          logits[llama_token_nl()] = nl_logit;
        }

        if (temp <= 0) {
          // Greedy sampling
          id = llama_sample_token_greedy(ctx, &candidates_p);
        } else {
          if (mirostat == 1) {
            static float mirostat_mu = 2.0f * mirostat_tau;
            const int mirostat_m = 100;
            llama_sample_temperature(ctx, &candidates_p, temp);
            id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau,
                                             mirostat_eta, mirostat_m,
                                             &mirostat_mu);
          } else if (mirostat == 2) {
            static float mirostat_mu = 2.0f * mirostat_tau;
            llama_sample_temperature(ctx, &candidates_p, temp);
            id = llama_sample_token_mirostat_v2(
                ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
          } else {
            // Temperature sampling
            llama_sample_top_k(ctx, &candidates_p, top_k, 1);
            llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
            llama_sample_typical(ctx, &candidates_p, typical_p, 1);
            llama_sample_top_p(ctx, &candidates_p, top_p, 1);
            llama_sample_temperature(ctx, &candidates_p, temp);
            id = llama_sample_token(ctx, &candidates_p);
          }
        }
Jeffrey Morgan's avatar
Jeffrey Morgan committed
438
        // printf("`%d`", candidates_p.size);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
439
440
441
442
443
444
445
446
447
448
449
450
451
452

        last_n_tokens.erase(last_n_tokens.begin());
        last_n_tokens.push_back(id);
      }

      // add it to the context
      embd.push_back(id);

      // decrement remaining sampling budget
      --n_remain;

      // call the token callback, no need to check if one is actually
      // registered, that will be handled on the Go side.
      auto token_str = llama_token_to_str(ctx, id);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
453
      if (!tokenCallback(state_pr, (char *)token_str)) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
454
455
456
457
458
459
460
461
462
463
        break;
      }
    } else {
      // some user input remains from prompt or interaction, forward it to
      // processing
      while ((int)embd_inp.size() > n_consumed) {
        embd.push_back(embd_inp[n_consumed]);
        last_n_tokens.erase(last_n_tokens.begin());
        last_n_tokens.push_back(embd_inp[n_consumed]);
        ++n_consumed;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
464
        if ((int)embd.size() >= params.n_batch) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
465
466
467
468
469
470
471
472
473
474
          break;
        }
      }
    }

    for (auto id : embd) {
      res += llama_token_to_str(ctx, id);
    }

    // check for stop prompt
Jeffrey Morgan's avatar
Jeffrey Morgan committed
475
    if (params.antiprompt.size()) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
476
477
478
479
480
      std::string last_output;
      for (auto id : last_n_tokens) {
        last_output += llama_token_to_str(ctx, id);
      }
      // Check if each of the reverse prompts appears at the end of the output.
Jeffrey Morgan's avatar
Jeffrey Morgan committed
481
      for (std::string &antiprompt : params.antiprompt) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
        // size_t extra_padding = params.interactive ? 0 : 2;
        size_t extra_padding = 2;
        size_t search_start_pos =
            last_output.length() >
                    static_cast<size_t>(antiprompt.length() + extra_padding)
                ? last_output.length() -
                      static_cast<size_t>(antiprompt.length() + extra_padding)
                : 0;

        if (last_output.find(antiprompt.c_str(), search_start_pos) !=
            std::string::npos) {
          goto end;
        }
      }
    }

    // end of text token
    if (!embd.empty() && embd.back() == llama_token_eos()) {
      break;
    }
  }

Jeffrey Morgan's avatar
Jeffrey Morgan committed
504
505
  if (!path_session.empty() && params.prompt_cache_all &&
      !params.prompt_cache_ro) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
    if (debug) {
      fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
              __func__, path_session.c_str());
    }
    llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(),
                            session_tokens.size());
  }

end:
#if defined(_WIN32)
  signal(SIGINT, SIG_DFL);
#endif

  if (debug) {
    llama_print_timings(ctx);
    llama_reset_timings(ctx);
  }

  strcpy(result, res.c_str());
  return 0;
}

Jeffrey Morgan's avatar
Jeffrey Morgan committed
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
void llama_binding_free_model(void *state_ptr) {
  llama_context *ctx = (llama_context *)state_ptr;
  llama_free(ctx);
}

void llama_free_params(void *params_ptr) {
  gpt_params *params = (gpt_params *)params_ptr;
  delete params;
}

int load_state(void *ctx, char *statefile, char *modes) {
  llama_context *state = (llama_context *)ctx;
  const llama_context *constState = static_cast<const llama_context *>(state);
  const size_t state_size = llama_get_state_size(state);
  uint8_t *state_mem = new uint8_t[state_size];

  {
    FILE *fp_read = fopen(statefile, modes);
    if (state_size != llama_get_state_size(constState)) {
      fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
      return 1;
    }

    const size_t ret = fread(state_mem, 1, state_size, fp_read);
    if (ret != state_size) {
      fprintf(stderr, "\n%s : failed to read state\n", __func__);
      return 1;
    }

    llama_set_state_data(
        state, state_mem); // could also read directly from memory mapped file
    fclose(fp_read);
  }

  return 0;
}

void save_state(void *ctx, char *dst, char *modes) {
  llama_context *state = (llama_context *)ctx;

  const size_t state_size = llama_get_state_size(state);
  uint8_t *state_mem = new uint8_t[state_size];

  // Save state (rng, logits, embedding and kv_cache) to file
  {
    FILE *fp_write = fopen(dst, modes);
    llama_copy_state_data(
        state, state_mem); // could also copy directly to memory mapped file
    fwrite(state_mem, 1, state_size, fp_write);
    fclose(fp_write);
  }
}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
580
581
582
583
584
585
586
587

void *llama_allocate_params(
    const char *prompt, int seed, int threads, int tokens, int top_k,
    float top_p, float temp, float repeat_penalty, int repeat_last_n,
    bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
    const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
    float frequency_penalty, float presence_penalty, int mirostat,
    float mirostat_eta, float mirostat_tau, bool penalize_nl,
Bruce MacDonald's avatar
Bruce MacDonald committed
588
589
    const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
    const char *tensorsplit) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
  gpt_params *params = new gpt_params;
  params->seed = seed;
  params->n_threads = threads;
  params->n_predict = tokens;
  params->repeat_last_n = repeat_last_n;
  params->top_k = top_k;
  params->top_p = top_p;
  params->memory_f16 = memory_f16;
  params->temp = temp;
  params->use_mmap = mmap;
  params->use_mlock = mlock;
  params->repeat_penalty = repeat_penalty;
  params->n_batch = n_batch;
  params->n_keep = n_keep;
  if (maingpu[0] != '\0') {
    params->main_gpu = std::stoi(maingpu);
  }

  if (tensorsplit[0] != '\0') {
    std::string arg_next = tensorsplit;
    // split string by , and /
    const std::regex regex{R"([,/]+)"};
    std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
    std::vector<std::string> split_arg{it, {}};
    GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);

    for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
      if (i < split_arg.size()) {
        params->tensor_split[i] = std::stof(split_arg[i]);
      } else {
        params->tensor_split[i] = 0.0f;
      }
    }
  }

  if (ignore_eos) {
    params->logit_bias[llama_token_eos()] = -INFINITY;
  }
628
629
630

  for (int i = 0; i < antiprompt_count; i++) {
    params->antiprompt.push_back(antiprompt[i]);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
631
  }
632

Jeffrey Morgan's avatar
Jeffrey Morgan committed
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
  params->tfs_z = tfs_z;
  params->typical_p = typical_p;
  params->presence_penalty = presence_penalty;
  params->mirostat = mirostat;
  params->mirostat_eta = mirostat_eta;
  params->mirostat_tau = mirostat_tau;
  params->penalize_nl = penalize_nl;
  std::stringstream ss(logit_bias);
  llama_token key;
  char sign;
  std::string value_str;
  if (ss >> key && ss >> sign && std::getline(ss, value_str) &&
      (sign == '+' || sign == '-')) {
    params->logit_bias[key] =
        std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  }
  params->frequency_penalty = frequency_penalty;
  params->prompt = prompt;

  return params;
}

void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
                 bool mlock, bool embeddings, bool mmap, bool low_vram,
                 bool vocab_only, int n_gpu_layers, int n_batch,
                 const char *maingpu, const char *tensorsplit, bool numa) {
Jeffrey Morgan's avatar
Jeffrey Morgan committed
659
  // load the model
Jeffrey Morgan's avatar
Jeffrey Morgan committed
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
  auto lparams = llama_context_default_params();

  lparams.n_ctx = n_ctx;
  lparams.seed = n_seed;
  lparams.f16_kv = memory_f16;
  lparams.embedding = embeddings;
  lparams.use_mlock = mlock;
  lparams.n_gpu_layers = n_gpu_layers;
  lparams.use_mmap = mmap;
  lparams.low_vram = low_vram;
  lparams.vocab_only = vocab_only;

  if (maingpu[0] != '\0') {
    lparams.main_gpu = std::stoi(maingpu);
  }

  if (tensorsplit[0] != '\0') {
    std::string arg_next = tensorsplit;
    // split string by , and /
    const std::regex regex{R"([,/]+)"};
    std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
    std::vector<std::string> split_arg{it, {}};
    GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);

    for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
      if (i < split_arg.size()) {
        lparams.tensor_split[i] = std::stof(split_arg[i]);
      } else {
        lparams.tensor_split[i] = 0.0f;
      }
    }
  }

  lparams.n_batch = n_batch;

  llama_init_backend(numa);
Jeffrey Morgan's avatar
Jeffrey Morgan committed
696
697
698
699
700
701
  void *res = nullptr;
  try {
    res = llama_init_from_file(fname, lparams);
  } catch (std::runtime_error &e) {
    fprintf(stderr, "failed %s", e.what());
    return res;
Jeffrey Morgan's avatar
Jeffrey Morgan committed
702
703
  }

Jeffrey Morgan's avatar
Jeffrey Morgan committed
704
705
  return res;
}