llama.go 13.9 KB
Newer Older
1
package llm
Jeffrey Morgan's avatar
Jeffrey Morgan committed
2

Michael Yang's avatar
Michael Yang committed
3
/*
4
5
6
#cgo CFLAGS: -Ofast -std=c11 -fPIC
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
#cgo CXXFLAGS: -std=c++11 -fPIC
7
8
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
Michael Yang's avatar
Michael Yang committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"

struct llama_sample_options
{
	float repeat_penalty;
	float frequency_penalty;
	float presence_penalty;
	float temperature;
	int32_t top_k;
	float top_p;
	float tfs_z;
	float typical_p;
	int mirostat;
	float mirostat_tau;
	float mirostat_eta;
Michael Yang's avatar
Michael Yang committed
26
	bool penalize_newline;
Michael Yang's avatar
Michael Yang committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
};

llama_token llama_sample(
		struct llama_context *ctx,
		struct llama_token_data *candidates,
		size_t n_candidates,
		const llama_token *last_tokens,
		size_t n_last_tokens,
		struct llama_sample_options *opts)
{
	llama_token_data_array candidates_p = {
		candidates,
		n_candidates,
		false,
	};

Michael Yang's avatar
Michael Yang committed
43
44
	struct llama_token_data newline = candidates_p.data[llama_token_nl()];

Michael Yang's avatar
Michael Yang committed
45
46
47
48
49
50
51
52
53
54
	llama_sample_repetition_penalty(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->repeat_penalty);

	llama_sample_frequency_and_presence_penalties(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->frequency_penalty, opts->presence_penalty);

Michael Yang's avatar
Michael Yang committed
55
56
57
58
	if (!opts->penalize_newline) {
		candidates_p.data[llama_token_nl()] = newline;
	}

Michael Yang's avatar
Michael Yang committed
59
60
61
	if (opts->temperature <= 0) {
		return llama_sample_token_greedy(ctx, &candidates_p);
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
62

Michael Yang's avatar
Michael Yang committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
	if (opts->mirostat == 1) {
		int mirostat_m = 100;
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			mirostat_m, &mirostat_mu);
	} else if (opts->mirostat == 2) {
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat_v2(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			&mirostat_mu);
	} else {
		llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
		llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
		llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
		llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token(ctx, &candidates_p);
	}
}
*/
import "C"
89

Jeffrey Morgan's avatar
Jeffrey Morgan committed
90
import (
Michael Yang's avatar
Michael Yang committed
91
	"bytes"
Michael Yang's avatar
Michael Yang committed
92
	"embed"
Michael Yang's avatar
Michael Yang committed
93
	"errors"
94
	"fmt"
Michael Yang's avatar
Michael Yang committed
95
	"io"
Michael Yang's avatar
Michael Yang committed
96
	"log"
Michael Yang's avatar
Michael Yang committed
97
	"os"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
98
	"strings"
Michael Yang's avatar
Michael Yang committed
99
	"sync"
Michael Yang's avatar
Michael Yang committed
100
	"unicode/utf8"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
101
	"unsafe"
Michael Yang's avatar
Michael Yang committed
102
103

	"github.com/jmorganca/ollama/api"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
104
105
)

Michael Yang's avatar
Michael Yang committed
106
107
108
//go:embed ggml-metal.metal
var fs embed.FS

Michael Yang's avatar
Michael Yang committed
109
const ModelFamilyLlama ModelFamily = "llama"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
110

Michael Yang's avatar
Michael Yang committed
111
112
113
type llamaModel struct {
	hyperparameters llamaHyperparameters
}
Michael Yang's avatar
Michael Yang committed
114

Michael Yang's avatar
Michael Yang committed
115
116
117
func (llm *llamaModel) ModelFamily() ModelFamily {
	return ModelFamilyLlama
}
Michael Yang's avatar
Michael Yang committed
118

Michael Yang's avatar
Michael Yang committed
119
func (llm *llamaModel) ModelType() ModelType {
Michael Yang's avatar
Michael Yang committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
	switch llm.hyperparameters.NumLayer {
	case 26:
		return ModelType3B
	case 32:
		return ModelType7B
	case 40:
		return ModelType13B
	case 60:
		return ModelType30B
	case 80:
		return ModelType65B
	}

	// TODO: find a better default
	return ModelType7B
Michael Yang's avatar
Michael Yang committed
135
136
137
138
}

func (llm *llamaModel) FileType() FileType {
	return llm.hyperparameters.FileType
Michael Yang's avatar
Michael Yang committed
139
}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
140

141
142
143
144
145
146
147
148
149
150
151
152
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32
Michael Yang's avatar
Michael Yang committed
153

154
	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
	FileType llamaFileType
}

type llamaFileType uint32

const (
	llamaFileTypeF32 llamaFileType = iota
	llamaFileTypeF16
	llamaFileTypeQ4_0
	llamaFileTypeQ4_1
	llamaFileTypeQ4_1_F16
	llamaFileTypeQ8_0 llamaFileType = iota + 2
	llamaFileTypeQ5_0
	llamaFileTypeQ5_1
	llamaFileTypeQ2_K
	llamaFileTypeQ3_K_S
	llamaFileTypeQ3_K_M
	llamaFileTypeQ3_K_L
	llamaFileTypeQ4_K_S
	llamaFileTypeQ4_K_M
	llamaFileTypeQ5_K_S
	llamaFileTypeQ5_K_M
	llamaFileTypeQ6_K
)

func (ft llamaFileType) String() string {
	switch ft {
	case llamaFileTypeF32:
		return "F32"
	case llamaFileTypeF16:
		return "F16"
	case llamaFileTypeQ4_0:
		return "Q4_0"
	case llamaFileTypeQ4_1:
		return "Q4_1"
	case llamaFileTypeQ4_1_F16:
		return "Q4_1_F16"
	case llamaFileTypeQ8_0:
		return "Q8_0"
	case llamaFileTypeQ5_0:
		return "Q5_0"
	case llamaFileTypeQ5_1:
		return "Q5_1"
	case llamaFileTypeQ2_K:
		return "Q2_K"
	case llamaFileTypeQ3_K_S:
		return "Q3_K_S"
	case llamaFileTypeQ3_K_M:
		return "Q3_K_M"
	case llamaFileTypeQ3_K_L:
		return "Q3_K_L"
	case llamaFileTypeQ4_K_S:
		return "Q4_K_S"
	case llamaFileTypeQ4_K_M:
		return "Q4_K_M"
	case llamaFileTypeQ5_K_S:
		return "Q5_K_S"
	case llamaFileTypeQ5_K_M:
		return "Q5_K_M"
	case llamaFileTypeQ6_K:
		return "Q6_K"
	default:
		return "Unknown"
	}
}

type llama struct {
	params *C.struct_llama_context_params
	model  *C.struct_llama_model
	ctx    *C.struct_llama_context

	last   []C.llama_token
	embd   []C.llama_token
	cursor int

	mu sync.Mutex
	gc bool

	api.Options
234
235
}

236
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
Michael Yang's avatar
Michael Yang committed
237
238
	if _, err := os.Stat(model); err != nil {
		return nil, err
Jeffrey Morgan's avatar
Jeffrey Morgan committed
239
240
	}

241
	llm := llama{Options: opts}
Michael Yang's avatar
Michael Yang committed
242

Michael Yang's avatar
Michael Yang committed
243
	C.llama_backend_init(C.bool(llm.UseNUMA))
Michael Yang's avatar
Michael Yang committed
244
245
246
247
248

	params := C.llama_context_default_params()
	params.seed = C.uint(llm.Seed)
	params.n_ctx = C.int(llm.NumCtx)
	params.n_batch = C.int(llm.NumBatch)
Michael Yang's avatar
Michael Yang committed
249
	params.n_gqa = C.int(llm.NumGQA)
Michael Yang's avatar
Michael Yang committed
250
251
252
253
254
255
256
257
258
	params.n_gpu_layers = C.int(llm.NumGPU)
	params.main_gpu = C.int(llm.MainGPU)
	params.low_vram = C.bool(llm.LowVRAM)
	params.f16_kv = C.bool(llm.F16KV)
	params.logits_all = C.bool(llm.LogitsAll)
	params.vocab_only = C.bool(llm.VocabOnly)
	params.use_mmap = C.bool(llm.UseMMap)
	params.use_mlock = C.bool(llm.UseMLock)
	params.embedding = C.bool(llm.EmbeddingOnly)
259
260
	params.rope_freq_base = C.float(llm.RopeFrequencyBase)
	params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
261
262
263
264
265
266

	if len(adapters) > 0 && llm.UseMMap {
		log.Printf("must disable mmap to use lora adapters")
		params.use_mmap = C.bool(false)
	}

Michael Yang's avatar
Michael Yang committed
267
268
269
270
271
272
	llm.params = &params

	cModel := C.CString(model)
	defer C.free(unsafe.Pointer(cModel))

	llm.model = C.llama_load_model_from_file(cModel, params)
273
274
275
276
	if llm.model == nil {
		return nil, errors.New("failed to load model")
	}

Michael Yang's avatar
Michael Yang committed
277
	llm.ctx = C.llama_new_context_with_model(llm.model, params)
278
279
280
	if llm.ctx == nil {
		return nil, errors.New("failed to create context")
	}
Michael Yang's avatar
Michael Yang committed
281

282
283
284
285
286
287
288
289
290
	for _, adapter := range adapters {
		cAdapter := C.CString(adapter)
		defer C.free(unsafe.Pointer(cAdapter))

		if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
			return nil, fmt.Errorf("failed to load adapter %s", adapter)
		}
	}

Michael Yang's avatar
Michael Yang committed
291
292
293
294
295
296
	// warm up the model
	bos := []C.llama_token{C.llama_token_bos()}
	C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
	C.llama_reset_timings(llm.ctx)

	return &llm, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
297
298
}

299
func (llm *llama) Close() {
Michael Yang's avatar
Michael Yang committed
300
301
302
303
304
	llm.gc = true

	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
305
306
307
308
	defer C.llama_free_model(llm.model)
	defer C.llama_free(llm.ctx)

	C.llama_print_timings(llm.ctx)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
309
310
}

311
312
313
314
func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
315
316
var errNeedMoreData = errors.New("need more data")

317
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
Michael Yang's avatar
Michael Yang committed
318
319
	C.llama_reset_timings(llm.ctx)

320
	llm.marshalPrompt(ctx, prompt)
Michael Yang's avatar
Michael Yang committed
321
322
323
324
325
326

	C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))

	var b bytes.Buffer
	for {
		token, err := llm.next()
327
328
329
		if llm.gc {
			return nil
		} else if errors.Is(err, io.EOF) {
Michael Yang's avatar
Michael Yang committed
330
331
332
333
334
			break
		} else if err != nil {
			return err
		}

335
		b.WriteString(llm.Decode(int(token)))
Michael Yang's avatar
Michael Yang committed
336
337
338
339
340
341
342
343
344
345
346

		if err := llm.checkStopConditions(b); err != nil {
			if errors.Is(err, io.EOF) {
				break
			} else if errors.Is(err, errNeedMoreData) {
				continue
			}

			return err
		}

Michael Yang's avatar
Michael Yang committed
347
348
349
		if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
			fn(api.GenerateResponse{Response: b.String()})
			b.Reset()
Michael Yang's avatar
Michael Yang committed
350
		}
Michael Yang's avatar
Michael Yang committed
351
	}
Michael Yang's avatar
Michael Yang committed
352

353
354
355
	embd := make([]int, len(llm.embd))
	for i := range llm.embd {
		embd[i] = int(llm.embd[i])
Jeffrey Morgan's avatar
Jeffrey Morgan committed
356
357
	}

Michael Yang's avatar
Michael Yang committed
358
359
360
	timings := C.llama_get_timings(llm.ctx)
	fn(api.GenerateResponse{
		Done:               true,
361
		Context:            embd,
Michael Yang's avatar
Michael Yang committed
362
363
		SampleCount:        int(timings.n_sample),
		SampleDuration:     parseDurationMs(float64(timings.t_sample_ms)),
Michael Yang's avatar
Michael Yang committed
364
365
366
367
368
369
370
371
372
		PromptEvalCount:    int(timings.n_p_eval),
		PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
		EvalCount:          int(timings.n_eval),
		EvalDuration:       parseDurationMs(float64(timings.t_eval_ms)),
	})

	return nil
}

373
func (llm *llama) checkStopConditions(b bytes.Buffer) error {
374
	for _, stopCondition := range llm.Stop {
375
		if stopCondition == strings.TrimSpace(b.String()) {
Michael Yang's avatar
Michael Yang committed
376
			return io.EOF
377
		} else if strings.HasPrefix(stopCondition, strings.TrimSpace(b.String())) {
Michael Yang's avatar
Michael Yang committed
378
379
380
381
382
383
384
			return errNeedMoreData
		}
	}

	return nil
}

385
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
386
	tokens := append(ctx, llm.Encode(prompt)...)
Michael Yang's avatar
Michael Yang committed
387
388
389
390
	if llm.NumKeep < 0 {
		llm.NumKeep = len(tokens)
	}

391
392
393
394
395
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

Michael Yang's avatar
Michael Yang committed
396
397
398
399
400
401
402
403
	// min(llm.NumCtx - 4, llm.NumKeep)
	if llm.NumCtx-4 < llm.NumKeep {
		llm.NumKeep = llm.NumCtx - 4
	}

	if len(tokens) >= llm.NumCtx {
		// truncate input
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
404
405
406
407
		truncated := cTokens[:llm.NumKeep]
		erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
		truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
		copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
Michael Yang's avatar
Michael Yang committed
408

409
		cTokens = truncated
Michael Yang's avatar
Michael Yang committed
410
411
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
	} else {
412
413
		llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
		llm.last = append(llm.last, cTokens...)
Michael Yang's avatar
Michael Yang committed
414
415
416
	}

	var i int
417
	for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
Michael Yang's avatar
Michael Yang committed
418
419
420
		// noop
	}

421
422
	llm.embd = cTokens
	if i == len(cTokens) {
Michael Yang's avatar
Michael Yang committed
423
424
425
426
427
428
429
		// evaluate at least one token to generate logits
		i--
	}

	llm.cursor = i

	log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
430
	return cTokens
Michael Yang's avatar
Michael Yang committed
431
}
Michael Yang's avatar
Michael Yang committed
432

433
func (llm *llama) Encode(prompt string) []int {
Michael Yang's avatar
Michael Yang committed
434
435
	cPrompt := C.CString(prompt)
	defer C.free(unsafe.Pointer(cPrompt))
Michael Yang's avatar
Michael Yang committed
436

437
438
439
440
441
442
443
444
	cTokens := make([]C.llama_token, len(prompt)+1)
	if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
		tokens := make([]int, n)
		for i := range cTokens[:n] {
			tokens[i] = int(cTokens[i])
		}

		return tokens
Jeffrey Morgan's avatar
Jeffrey Morgan committed
445
446
447
448
449
	}

	return nil
}

450
func (llm *llama) Decode(tokens ...int) string {
Michael Yang's avatar
Michael Yang committed
451
452
	var sb strings.Builder
	for _, token := range tokens {
453
		sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
Jeffrey Morgan's avatar
Jeffrey Morgan committed
454
455
	}

Michael Yang's avatar
Michael Yang committed
456
	return sb.String()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
457
458
}

459
func (llm *llama) next() (C.llama_token, error) {
460
461
462
	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
463
464
465
466
	if len(llm.embd) >= llm.NumCtx {
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
		truncated := llm.embd[:llm.NumKeep]
		truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
Michael Yang's avatar
Michael Yang committed
467

Michael Yang's avatar
Michael Yang committed
468
469
470
471
		llm.embd = truncated
		llm.cursor = llm.NumKeep
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
	}
Michael Yang's avatar
Michael Yang committed
472

Michael Yang's avatar
Michael Yang committed
473
	for {
474
475
476
477
		if llm.gc {
			return 0, io.EOF
		}

Michael Yang's avatar
Michael Yang committed
478
		if llm.cursor >= len(llm.embd) {
479
			break
Michael Yang's avatar
Michael Yang committed
480
481
		}

Michael Yang's avatar
Michael Yang committed
482
483
484
		numEval := len(llm.embd) - llm.cursor
		if numEval > llm.NumBatch {
			numEval = llm.NumBatch
Michael Yang's avatar
Michael Yang committed
485
		}
486

Michael Yang's avatar
Michael Yang committed
487
488
		if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
			return 0, fmt.Errorf("llama_eval: %d", retval)
489
		}
Michael Yang's avatar
Michael Yang committed
490

Michael Yang's avatar
Michael Yang committed
491
		llm.cursor += numEval
Jeffrey Morgan's avatar
Jeffrey Morgan committed
492
493
	}

Michael Yang's avatar
Michael Yang committed
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
	var sampleOpts C.struct_llama_sample_options
	sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
	sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
	sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
	sampleOpts.temperature = C.float(llm.Temperature)
	sampleOpts.top_k = C.int(llm.TopK)
	sampleOpts.top_p = C.float(llm.TopP)
	sampleOpts.tfs_z = C.float(llm.TFSZ)
	sampleOpts.typical_p = C.float(llm.TypicalP)
	sampleOpts.mirostat = C.int(llm.Mirostat)
	sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
	sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
	sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)

	numVocab := C.llama_n_vocab(llm.ctx)
Michael Yang's avatar
Michael Yang committed
509
510
	logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)

Michael Yang's avatar
Michael Yang committed
511
512
513
514
515
	// TODO: logit bias

	candidates := make([]C.llama_token_data, numVocab)
	for i := range logits {
		candidates[i] = C.llama_token_data{
Michael Yang's avatar
Michael Yang committed
516
517
518
			id:    C.int(i),
			logit: logits[i],
			p:     0,
Michael Yang's avatar
Michael Yang committed
519
		}
Michael Yang's avatar
Michael Yang committed
520
	}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
521

Michael Yang's avatar
Michael Yang committed
522
523
524
525
526
527
528
529
530
531
532
	repeatLastN := llm.RepeatLastN
	if len(llm.last) < repeatLastN {
		repeatLastN = len(llm.last)
	}

	if llm.NumCtx < repeatLastN {
		repeatLastN = llm.NumCtx
	}

	lastN := llm.last[len(llm.last)-repeatLastN:]

Michael Yang's avatar
Michael Yang committed
533
534
	token := C.llama_sample(
		llm.ctx,
Michael Yang's avatar
Michael Yang committed
535
536
537
538
539
540
541
542
543
544
		unsafe.SliceData(candidates), C.size_t(len(candidates)),
		unsafe.SliceData(lastN), C.size_t(len(lastN)),
		&sampleOpts,
	)

	llm.last = append(llm.last, token)
	llm.embd = append(llm.embd, token)

	if token == C.llama_token_eos() {
		return 0, io.EOF
Jeffrey Morgan's avatar
Jeffrey Morgan committed
545
	}
Michael Yang's avatar
Michael Yang committed
546

Michael Yang's avatar
Michael Yang committed
547
	return token, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
548
}
549

550
func (llm *llama) Embedding(input string) ([]float64, error) {
551
552
553
554
	if !llm.EmbeddingOnly {
		return nil, errors.New("llama: embedding not enabled")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
555
	tokens := llm.Encode(input)
556
557
558
559
	if tokens == nil {
		return nil, errors.New("llama: tokenize embedding")
	}

560
561
562
563
564
565
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

	retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
566
567
568
569
	if retval != 0 {
		return nil, errors.New("llama: eval")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
570
	C.llama_print_timings(llm.ctx)
Bruce MacDonald's avatar
Bruce MacDonald committed
571

572
	n := C.llama_n_embd(llm.ctx)
573
574
575
	if n <= 0 {
		return nil, errors.New("llama: no embeddings generated")
	}
576
	cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
577

578
579
580
	embeddings := make([]float64, len(cEmbeddings))
	for i, v := range cEmbeddings {
		embeddings[i] = float64(v)
581
	}
582
	return embeddings, nil
583
}