llama.go 13.6 KB
Newer Older
1
package llm
Jeffrey Morgan's avatar
Jeffrey Morgan committed
2

Michael Yang's avatar
Michael Yang committed
3
/*
4
5
6
#cgo CFLAGS: -Ofast -std=c11 -fPIC
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
#cgo CXXFLAGS: -std=c++11 -fPIC
7
8
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
Michael Yang's avatar
Michael Yang committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"

struct llama_sample_options
{
	float repeat_penalty;
	float frequency_penalty;
	float presence_penalty;
	float temperature;
	int32_t top_k;
	float top_p;
	float tfs_z;
	float typical_p;
	int mirostat;
	float mirostat_tau;
	float mirostat_eta;
Michael Yang's avatar
Michael Yang committed
26
	bool penalize_newline;
Michael Yang's avatar
Michael Yang committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
};

llama_token llama_sample(
		struct llama_context *ctx,
		struct llama_token_data *candidates,
		size_t n_candidates,
		const llama_token *last_tokens,
		size_t n_last_tokens,
		struct llama_sample_options *opts)
{
	llama_token_data_array candidates_p = {
		candidates,
		n_candidates,
		false,
	};

Michael Yang's avatar
Michael Yang committed
43
44
	struct llama_token_data newline = candidates_p.data[llama_token_nl()];

Michael Yang's avatar
Michael Yang committed
45
46
47
48
49
50
51
52
53
54
	llama_sample_repetition_penalty(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->repeat_penalty);

	llama_sample_frequency_and_presence_penalties(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->frequency_penalty, opts->presence_penalty);

Michael Yang's avatar
Michael Yang committed
55
56
57
58
	if (!opts->penalize_newline) {
		candidates_p.data[llama_token_nl()] = newline;
	}

Michael Yang's avatar
Michael Yang committed
59
60
61
	if (opts->temperature <= 0) {
		return llama_sample_token_greedy(ctx, &candidates_p);
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
62

Michael Yang's avatar
Michael Yang committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
	if (opts->mirostat == 1) {
		int mirostat_m = 100;
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			mirostat_m, &mirostat_mu);
	} else if (opts->mirostat == 2) {
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat_v2(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			&mirostat_mu);
	} else {
		llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
		llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
		llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
		llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token(ctx, &candidates_p);
	}
}
*/
import "C"
89

Jeffrey Morgan's avatar
Jeffrey Morgan committed
90
import (
Michael Yang's avatar
Michael Yang committed
91
	"bytes"
Michael Yang's avatar
Michael Yang committed
92
	"embed"
Michael Yang's avatar
Michael Yang committed
93
	"errors"
94
	"fmt"
Michael Yang's avatar
Michael Yang committed
95
	"io"
Michael Yang's avatar
Michael Yang committed
96
	"log"
Michael Yang's avatar
Michael Yang committed
97
	"os"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
98
	"strings"
Michael Yang's avatar
Michael Yang committed
99
	"sync"
Michael Yang's avatar
Michael Yang committed
100
	"unicode/utf8"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
101
	"unsafe"
Michael Yang's avatar
Michael Yang committed
102
103

	"github.com/jmorganca/ollama/api"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
104
105
)

Michael Yang's avatar
Michael Yang committed
106
107
108
//go:embed ggml-metal.metal
var fs embed.FS

Michael Yang's avatar
Michael Yang committed
109
const ModelFamilyLlama ModelFamily = "llama"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
110

Michael Yang's avatar
Michael Yang committed
111
112
113
type llamaModel struct {
	hyperparameters llamaHyperparameters
}
Michael Yang's avatar
Michael Yang committed
114

Michael Yang's avatar
Michael Yang committed
115
116
117
func (llm *llamaModel) ModelFamily() ModelFamily {
	return ModelFamilyLlama
}
Michael Yang's avatar
Michael Yang committed
118

Michael Yang's avatar
Michael Yang committed
119
120
121
122
123
124
func (llm *llamaModel) ModelType() ModelType {
	return ModelType30B
}

func (llm *llamaModel) FileType() FileType {
	return llm.hyperparameters.FileType
Michael Yang's avatar
Michael Yang committed
125
}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
126

127
128
129
130
131
132
133
134
135
136
137
138
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32
Michael Yang's avatar
Michael Yang committed
139

140
	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
	FileType llamaFileType
}

type llamaFileType uint32

const (
	llamaFileTypeF32 llamaFileType = iota
	llamaFileTypeF16
	llamaFileTypeQ4_0
	llamaFileTypeQ4_1
	llamaFileTypeQ4_1_F16
	llamaFileTypeQ8_0 llamaFileType = iota + 2
	llamaFileTypeQ5_0
	llamaFileTypeQ5_1
	llamaFileTypeQ2_K
	llamaFileTypeQ3_K_S
	llamaFileTypeQ3_K_M
	llamaFileTypeQ3_K_L
	llamaFileTypeQ4_K_S
	llamaFileTypeQ4_K_M
	llamaFileTypeQ5_K_S
	llamaFileTypeQ5_K_M
	llamaFileTypeQ6_K
)

func (ft llamaFileType) String() string {
	switch ft {
	case llamaFileTypeF32:
		return "F32"
	case llamaFileTypeF16:
		return "F16"
	case llamaFileTypeQ4_0:
		return "Q4_0"
	case llamaFileTypeQ4_1:
		return "Q4_1"
	case llamaFileTypeQ4_1_F16:
		return "Q4_1_F16"
	case llamaFileTypeQ8_0:
		return "Q8_0"
	case llamaFileTypeQ5_0:
		return "Q5_0"
	case llamaFileTypeQ5_1:
		return "Q5_1"
	case llamaFileTypeQ2_K:
		return "Q2_K"
	case llamaFileTypeQ3_K_S:
		return "Q3_K_S"
	case llamaFileTypeQ3_K_M:
		return "Q3_K_M"
	case llamaFileTypeQ3_K_L:
		return "Q3_K_L"
	case llamaFileTypeQ4_K_S:
		return "Q4_K_S"
	case llamaFileTypeQ4_K_M:
		return "Q4_K_M"
	case llamaFileTypeQ5_K_S:
		return "Q5_K_S"
	case llamaFileTypeQ5_K_M:
		return "Q5_K_M"
	case llamaFileTypeQ6_K:
		return "Q6_K"
	default:
		return "Unknown"
	}
}

type llama struct {
	params *C.struct_llama_context_params
	model  *C.struct_llama_model
	ctx    *C.struct_llama_context

	last   []C.llama_token
	embd   []C.llama_token
	cursor int

	mu sync.Mutex
	gc bool

	api.Options
220
221
}

222
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
Michael Yang's avatar
Michael Yang committed
223
224
	if _, err := os.Stat(model); err != nil {
		return nil, err
Jeffrey Morgan's avatar
Jeffrey Morgan committed
225
226
	}

227
	llm := llama{Options: opts}
Michael Yang's avatar
Michael Yang committed
228

Michael Yang's avatar
Michael Yang committed
229
	C.llama_backend_init(C.bool(llm.UseNUMA))
Michael Yang's avatar
Michael Yang committed
230
231
232
233
234

	params := C.llama_context_default_params()
	params.seed = C.uint(llm.Seed)
	params.n_ctx = C.int(llm.NumCtx)
	params.n_batch = C.int(llm.NumBatch)
Michael Yang's avatar
Michael Yang committed
235
	params.n_gqa = C.int(llm.NumGQA)
Michael Yang's avatar
Michael Yang committed
236
237
238
239
240
241
242
243
244
	params.n_gpu_layers = C.int(llm.NumGPU)
	params.main_gpu = C.int(llm.MainGPU)
	params.low_vram = C.bool(llm.LowVRAM)
	params.f16_kv = C.bool(llm.F16KV)
	params.logits_all = C.bool(llm.LogitsAll)
	params.vocab_only = C.bool(llm.VocabOnly)
	params.use_mmap = C.bool(llm.UseMMap)
	params.use_mlock = C.bool(llm.UseMLock)
	params.embedding = C.bool(llm.EmbeddingOnly)
245
246
	params.rope_freq_base = C.float(llm.RopeFrequencyBase)
	params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
247
248
249
250
251
252

	if len(adapters) > 0 && llm.UseMMap {
		log.Printf("must disable mmap to use lora adapters")
		params.use_mmap = C.bool(false)
	}

Michael Yang's avatar
Michael Yang committed
253
254
255
256
257
258
	llm.params = &params

	cModel := C.CString(model)
	defer C.free(unsafe.Pointer(cModel))

	llm.model = C.llama_load_model_from_file(cModel, params)
259
260
261
262
	if llm.model == nil {
		return nil, errors.New("failed to load model")
	}

Michael Yang's avatar
Michael Yang committed
263
	llm.ctx = C.llama_new_context_with_model(llm.model, params)
264
265
266
	if llm.ctx == nil {
		return nil, errors.New("failed to create context")
	}
Michael Yang's avatar
Michael Yang committed
267

268
269
270
271
272
273
274
275
276
	for _, adapter := range adapters {
		cAdapter := C.CString(adapter)
		defer C.free(unsafe.Pointer(cAdapter))

		if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
			return nil, fmt.Errorf("failed to load adapter %s", adapter)
		}
	}

Michael Yang's avatar
Michael Yang committed
277
278
279
280
281
282
	// warm up the model
	bos := []C.llama_token{C.llama_token_bos()}
	C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
	C.llama_reset_timings(llm.ctx)

	return &llm, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
283
284
}

285
func (llm *llama) Close() {
Michael Yang's avatar
Michael Yang committed
286
287
288
289
290
	llm.gc = true

	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
291
292
293
294
	defer C.llama_free_model(llm.model)
	defer C.llama_free(llm.ctx)

	C.llama_print_timings(llm.ctx)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
295
296
}

297
298
299
300
func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
301
302
var errNeedMoreData = errors.New("need more data")

303
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
Michael Yang's avatar
Michael Yang committed
304
305
	C.llama_reset_timings(llm.ctx)

306
	llm.marshalPrompt(ctx, prompt)
Michael Yang's avatar
Michael Yang committed
307
308
309
310
311
312

	C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))

	var b bytes.Buffer
	for {
		token, err := llm.next()
313
314
315
		if llm.gc {
			return nil
		} else if errors.Is(err, io.EOF) {
Michael Yang's avatar
Michael Yang committed
316
317
318
319
320
			break
		} else if err != nil {
			return err
		}

321
		b.WriteString(llm.Decode(int(token)))
Michael Yang's avatar
Michael Yang committed
322
323
324
325
326
327
328
329
330
331
332

		if err := llm.checkStopConditions(b); err != nil {
			if errors.Is(err, io.EOF) {
				break
			} else if errors.Is(err, errNeedMoreData) {
				continue
			}

			return err
		}

Michael Yang's avatar
Michael Yang committed
333
334
335
		if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
			fn(api.GenerateResponse{Response: b.String()})
			b.Reset()
Michael Yang's avatar
Michael Yang committed
336
		}
Michael Yang's avatar
Michael Yang committed
337
	}
Michael Yang's avatar
Michael Yang committed
338

339
340
341
	embd := make([]int, len(llm.embd))
	for i := range llm.embd {
		embd[i] = int(llm.embd[i])
Jeffrey Morgan's avatar
Jeffrey Morgan committed
342
343
	}

Michael Yang's avatar
Michael Yang committed
344
345
346
	timings := C.llama_get_timings(llm.ctx)
	fn(api.GenerateResponse{
		Done:               true,
347
		Context:            embd,
Michael Yang's avatar
Michael Yang committed
348
349
		SampleCount:        int(timings.n_sample),
		SampleDuration:     parseDurationMs(float64(timings.t_sample_ms)),
Michael Yang's avatar
Michael Yang committed
350
351
352
353
354
355
356
357
358
		PromptEvalCount:    int(timings.n_p_eval),
		PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
		EvalCount:          int(timings.n_eval),
		EvalDuration:       parseDurationMs(float64(timings.t_eval_ms)),
	})

	return nil
}

359
func (llm *llama) checkStopConditions(b bytes.Buffer) error {
360
	for _, stopCondition := range llm.Stop {
361
		if stopCondition == strings.TrimSpace(b.String()) {
Michael Yang's avatar
Michael Yang committed
362
			return io.EOF
363
		} else if strings.HasPrefix(stopCondition, strings.TrimSpace(b.String())) {
Michael Yang's avatar
Michael Yang committed
364
365
366
367
368
369
370
			return errNeedMoreData
		}
	}

	return nil
}

371
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
372
	tokens := append(ctx, llm.Encode(prompt)...)
Michael Yang's avatar
Michael Yang committed
373
374
375
376
	if llm.NumKeep < 0 {
		llm.NumKeep = len(tokens)
	}

377
378
379
380
381
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

Michael Yang's avatar
Michael Yang committed
382
383
384
385
386
387
388
389
	// min(llm.NumCtx - 4, llm.NumKeep)
	if llm.NumCtx-4 < llm.NumKeep {
		llm.NumKeep = llm.NumCtx - 4
	}

	if len(tokens) >= llm.NumCtx {
		// truncate input
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
390
391
392
393
		truncated := cTokens[:llm.NumKeep]
		erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
		truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
		copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
Michael Yang's avatar
Michael Yang committed
394

395
		cTokens = truncated
Michael Yang's avatar
Michael Yang committed
396
397
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
	} else {
398
399
		llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
		llm.last = append(llm.last, cTokens...)
Michael Yang's avatar
Michael Yang committed
400
401
402
	}

	var i int
403
	for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
Michael Yang's avatar
Michael Yang committed
404
405
406
		// noop
	}

407
408
	llm.embd = cTokens
	if i == len(cTokens) {
Michael Yang's avatar
Michael Yang committed
409
410
411
412
413
414
415
		// evaluate at least one token to generate logits
		i--
	}

	llm.cursor = i

	log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
416
	return cTokens
Michael Yang's avatar
Michael Yang committed
417
}
Michael Yang's avatar
Michael Yang committed
418

419
func (llm *llama) Encode(prompt string) []int {
Michael Yang's avatar
Michael Yang committed
420
421
	cPrompt := C.CString(prompt)
	defer C.free(unsafe.Pointer(cPrompt))
Michael Yang's avatar
Michael Yang committed
422

423
424
425
426
427
428
429
430
	cTokens := make([]C.llama_token, len(prompt)+1)
	if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
		tokens := make([]int, n)
		for i := range cTokens[:n] {
			tokens[i] = int(cTokens[i])
		}

		return tokens
Jeffrey Morgan's avatar
Jeffrey Morgan committed
431
432
433
434
435
	}

	return nil
}

436
func (llm *llama) Decode(tokens ...int) string {
Michael Yang's avatar
Michael Yang committed
437
438
	var sb strings.Builder
	for _, token := range tokens {
439
		sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
Jeffrey Morgan's avatar
Jeffrey Morgan committed
440
441
	}

Michael Yang's avatar
Michael Yang committed
442
	return sb.String()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
443
444
}

445
func (llm *llama) next() (C.llama_token, error) {
446
447
448
	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
449
450
451
452
	if len(llm.embd) >= llm.NumCtx {
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
		truncated := llm.embd[:llm.NumKeep]
		truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
Michael Yang's avatar
Michael Yang committed
453

Michael Yang's avatar
Michael Yang committed
454
455
456
457
		llm.embd = truncated
		llm.cursor = llm.NumKeep
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
	}
Michael Yang's avatar
Michael Yang committed
458

Michael Yang's avatar
Michael Yang committed
459
	for {
460
461
462
463
		if llm.gc {
			return 0, io.EOF
		}

Michael Yang's avatar
Michael Yang committed
464
		if llm.cursor >= len(llm.embd) {
465
			break
Michael Yang's avatar
Michael Yang committed
466
467
		}

Michael Yang's avatar
Michael Yang committed
468
469
470
		numEval := len(llm.embd) - llm.cursor
		if numEval > llm.NumBatch {
			numEval = llm.NumBatch
Michael Yang's avatar
Michael Yang committed
471
		}
472

Michael Yang's avatar
Michael Yang committed
473
474
		if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
			return 0, fmt.Errorf("llama_eval: %d", retval)
475
		}
Michael Yang's avatar
Michael Yang committed
476

Michael Yang's avatar
Michael Yang committed
477
		llm.cursor += numEval
Jeffrey Morgan's avatar
Jeffrey Morgan committed
478
479
	}

Michael Yang's avatar
Michael Yang committed
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
	var sampleOpts C.struct_llama_sample_options
	sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
	sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
	sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
	sampleOpts.temperature = C.float(llm.Temperature)
	sampleOpts.top_k = C.int(llm.TopK)
	sampleOpts.top_p = C.float(llm.TopP)
	sampleOpts.tfs_z = C.float(llm.TFSZ)
	sampleOpts.typical_p = C.float(llm.TypicalP)
	sampleOpts.mirostat = C.int(llm.Mirostat)
	sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
	sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
	sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)

	numVocab := C.llama_n_vocab(llm.ctx)
Michael Yang's avatar
Michael Yang committed
495
496
	logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)

Michael Yang's avatar
Michael Yang committed
497
498
499
500
501
	// TODO: logit bias

	candidates := make([]C.llama_token_data, numVocab)
	for i := range logits {
		candidates[i] = C.llama_token_data{
Michael Yang's avatar
Michael Yang committed
502
503
504
			id:    C.int(i),
			logit: logits[i],
			p:     0,
Michael Yang's avatar
Michael Yang committed
505
		}
Michael Yang's avatar
Michael Yang committed
506
	}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
507

Michael Yang's avatar
Michael Yang committed
508
509
510
511
512
513
514
515
516
517
518
	repeatLastN := llm.RepeatLastN
	if len(llm.last) < repeatLastN {
		repeatLastN = len(llm.last)
	}

	if llm.NumCtx < repeatLastN {
		repeatLastN = llm.NumCtx
	}

	lastN := llm.last[len(llm.last)-repeatLastN:]

Michael Yang's avatar
Michael Yang committed
519
520
	token := C.llama_sample(
		llm.ctx,
Michael Yang's avatar
Michael Yang committed
521
522
523
524
525
526
527
528
529
530
		unsafe.SliceData(candidates), C.size_t(len(candidates)),
		unsafe.SliceData(lastN), C.size_t(len(lastN)),
		&sampleOpts,
	)

	llm.last = append(llm.last, token)
	llm.embd = append(llm.embd, token)

	if token == C.llama_token_eos() {
		return 0, io.EOF
Jeffrey Morgan's avatar
Jeffrey Morgan committed
531
	}
Michael Yang's avatar
Michael Yang committed
532

Michael Yang's avatar
Michael Yang committed
533
	return token, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
534
}
535

536
func (llm *llama) Embedding(input string) ([]float64, error) {
537
538
539
540
	if !llm.EmbeddingOnly {
		return nil, errors.New("llama: embedding not enabled")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
541
	tokens := llm.Encode(input)
542
543
544
545
	if tokens == nil {
		return nil, errors.New("llama: tokenize embedding")
	}

546
547
548
549
550
551
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

	retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
552
553
554
555
	if retval != 0 {
		return nil, errors.New("llama: eval")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
556
	C.llama_print_timings(llm.ctx)
Bruce MacDonald's avatar
Bruce MacDonald committed
557

558
	n := C.llama_n_embd(llm.ctx)
559
560
561
	if n <= 0 {
		return nil, errors.New("llama: no embeddings generated")
	}
562
	cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
563

564
565
566
	embeddings := make([]float64, len(cEmbeddings))
	for i, v := range cEmbeddings {
		embeddings[i] = float64(v)
567
	}
568
	return embeddings, nil
569
}