llama.go 14.3 KB
Newer Older
1
package llm
Jeffrey Morgan's avatar
Jeffrey Morgan committed
2

Michael Yang's avatar
Michael Yang committed
3
/*
4
5
6
#cgo CFLAGS: -Ofast -std=c11 -fPIC
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
#cgo CXXFLAGS: -std=c++11 -fPIC
7
8
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
Michael Yang's avatar
Michael Yang committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"

struct llama_sample_options
{
	float repeat_penalty;
	float frequency_penalty;
	float presence_penalty;
	float temperature;
	int32_t top_k;
	float top_p;
	float tfs_z;
	float typical_p;
	int mirostat;
	float mirostat_tau;
	float mirostat_eta;
Michael Yang's avatar
Michael Yang committed
26
	bool penalize_newline;
Michael Yang's avatar
Michael Yang committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
};

llama_token llama_sample(
		struct llama_context *ctx,
		struct llama_token_data *candidates,
		size_t n_candidates,
		const llama_token *last_tokens,
		size_t n_last_tokens,
		struct llama_sample_options *opts)
{
	llama_token_data_array candidates_p = {
		candidates,
		n_candidates,
		false,
	};

Michael Yang's avatar
Michael Yang committed
43
44
	struct llama_token_data newline = candidates_p.data[llama_token_nl()];

Michael Yang's avatar
Michael Yang committed
45
46
47
48
49
50
51
52
53
54
	llama_sample_repetition_penalty(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->repeat_penalty);

	llama_sample_frequency_and_presence_penalties(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->frequency_penalty, opts->presence_penalty);

Michael Yang's avatar
Michael Yang committed
55
56
57
58
	if (!opts->penalize_newline) {
		candidates_p.data[llama_token_nl()] = newline;
	}

Michael Yang's avatar
Michael Yang committed
59
60
61
	if (opts->temperature <= 0) {
		return llama_sample_token_greedy(ctx, &candidates_p);
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
62

Michael Yang's avatar
Michael Yang committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
	if (opts->mirostat == 1) {
		int mirostat_m = 100;
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			mirostat_m, &mirostat_mu);
	} else if (opts->mirostat == 2) {
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat_v2(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			&mirostat_mu);
	} else {
		llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
		llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
		llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
		llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token(ctx, &candidates_p);
	}
}
*/
import "C"
89

Jeffrey Morgan's avatar
Jeffrey Morgan committed
90
import (
Michael Yang's avatar
Michael Yang committed
91
	"bytes"
Michael Yang's avatar
Michael Yang committed
92
	"embed"
Michael Yang's avatar
Michael Yang committed
93
	"errors"
94
	"fmt"
Michael Yang's avatar
Michael Yang committed
95
	"io"
Michael Yang's avatar
Michael Yang committed
96
	"log"
Michael Yang's avatar
Michael Yang committed
97
	"os"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
98
	"strings"
Michael Yang's avatar
Michael Yang committed
99
	"sync"
Michael Yang's avatar
Michael Yang committed
100
	"unicode/utf8"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
101
	"unsafe"
Michael Yang's avatar
Michael Yang committed
102
103

	"github.com/jmorganca/ollama/api"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
104
105
)

Michael Yang's avatar
Michael Yang committed
106
107
108
//go:embed ggml-metal.metal
var fs embed.FS

Michael Yang's avatar
Michael Yang committed
109
const ModelFamilyLlama ModelFamily = "llama"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
110

Michael Yang's avatar
Michael Yang committed
111
112
113
type llamaModel struct {
	hyperparameters llamaHyperparameters
}
Michael Yang's avatar
Michael Yang committed
114

Michael Yang's avatar
Michael Yang committed
115
116
117
func (llm *llamaModel) ModelFamily() ModelFamily {
	return ModelFamilyLlama
}
Michael Yang's avatar
Michael Yang committed
118

Michael Yang's avatar
Michael Yang committed
119
func (llm *llamaModel) ModelType() ModelType {
Michael Yang's avatar
Michael Yang committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
	switch llm.hyperparameters.NumLayer {
	case 26:
		return ModelType3B
	case 32:
		return ModelType7B
	case 40:
		return ModelType13B
	case 60:
		return ModelType30B
	case 80:
		return ModelType65B
	}

	// TODO: find a better default
	return ModelType7B
Michael Yang's avatar
Michael Yang committed
135
136
137
138
}

func (llm *llamaModel) FileType() FileType {
	return llm.hyperparameters.FileType
Michael Yang's avatar
Michael Yang committed
139
}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
140

141
142
143
144
145
146
147
148
149
150
151
152
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32
Michael Yang's avatar
Michael Yang committed
153

154
	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
	FileType llamaFileType
}

type llamaFileType uint32

const (
	llamaFileTypeF32 llamaFileType = iota
	llamaFileTypeF16
	llamaFileTypeQ4_0
	llamaFileTypeQ4_1
	llamaFileTypeQ4_1_F16
	llamaFileTypeQ8_0 llamaFileType = iota + 2
	llamaFileTypeQ5_0
	llamaFileTypeQ5_1
	llamaFileTypeQ2_K
	llamaFileTypeQ3_K_S
	llamaFileTypeQ3_K_M
	llamaFileTypeQ3_K_L
	llamaFileTypeQ4_K_S
	llamaFileTypeQ4_K_M
	llamaFileTypeQ5_K_S
	llamaFileTypeQ5_K_M
	llamaFileTypeQ6_K
)

func (ft llamaFileType) String() string {
	switch ft {
	case llamaFileTypeF32:
		return "F32"
	case llamaFileTypeF16:
		return "F16"
	case llamaFileTypeQ4_0:
		return "Q4_0"
	case llamaFileTypeQ4_1:
		return "Q4_1"
	case llamaFileTypeQ4_1_F16:
		return "Q4_1_F16"
	case llamaFileTypeQ8_0:
		return "Q8_0"
	case llamaFileTypeQ5_0:
		return "Q5_0"
	case llamaFileTypeQ5_1:
		return "Q5_1"
	case llamaFileTypeQ2_K:
		return "Q2_K"
	case llamaFileTypeQ3_K_S:
		return "Q3_K_S"
	case llamaFileTypeQ3_K_M:
		return "Q3_K_M"
	case llamaFileTypeQ3_K_L:
		return "Q3_K_L"
	case llamaFileTypeQ4_K_S:
		return "Q4_K_S"
	case llamaFileTypeQ4_K_M:
		return "Q4_K_M"
	case llamaFileTypeQ5_K_S:
		return "Q5_K_S"
	case llamaFileTypeQ5_K_M:
		return "Q5_K_M"
	case llamaFileTypeQ6_K:
		return "Q6_K"
	default:
		return "Unknown"
	}
}

type llama struct {
	params *C.struct_llama_context_params
	model  *C.struct_llama_model
	ctx    *C.struct_llama_context

	last   []C.llama_token
	embd   []C.llama_token
	cursor int

	mu sync.Mutex
	gc bool

	api.Options
234
235
}

236
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
Michael Yang's avatar
Michael Yang committed
237
238
	if _, err := os.Stat(model); err != nil {
		return nil, err
Jeffrey Morgan's avatar
Jeffrey Morgan committed
239
240
	}

241
	llm := llama{Options: opts}
Michael Yang's avatar
Michael Yang committed
242

Michael Yang's avatar
Michael Yang committed
243
	C.llama_backend_init(C.bool(llm.UseNUMA))
Michael Yang's avatar
Michael Yang committed
244
245
246
247
248

	params := C.llama_context_default_params()
	params.seed = C.uint(llm.Seed)
	params.n_ctx = C.int(llm.NumCtx)
	params.n_batch = C.int(llm.NumBatch)
Michael Yang's avatar
Michael Yang committed
249
	params.n_gqa = C.int(llm.NumGQA)
Michael Yang's avatar
Michael Yang committed
250
251
252
253
254
255
256
257
258
	params.n_gpu_layers = C.int(llm.NumGPU)
	params.main_gpu = C.int(llm.MainGPU)
	params.low_vram = C.bool(llm.LowVRAM)
	params.f16_kv = C.bool(llm.F16KV)
	params.logits_all = C.bool(llm.LogitsAll)
	params.vocab_only = C.bool(llm.VocabOnly)
	params.use_mmap = C.bool(llm.UseMMap)
	params.use_mlock = C.bool(llm.UseMLock)
	params.embedding = C.bool(llm.EmbeddingOnly)
259
260
	params.rope_freq_base = C.float(llm.RopeFrequencyBase)
	params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
261
262
263
264
265
266

	if len(adapters) > 0 && llm.UseMMap {
		log.Printf("must disable mmap to use lora adapters")
		params.use_mmap = C.bool(false)
	}

Michael Yang's avatar
Michael Yang committed
267
268
269
270
271
272
	llm.params = &params

	cModel := C.CString(model)
	defer C.free(unsafe.Pointer(cModel))

	llm.model = C.llama_load_model_from_file(cModel, params)
273
274
275
276
	if llm.model == nil {
		return nil, errors.New("failed to load model")
	}

Michael Yang's avatar
Michael Yang committed
277
	llm.ctx = C.llama_new_context_with_model(llm.model, params)
278
279
280
	if llm.ctx == nil {
		return nil, errors.New("failed to create context")
	}
Michael Yang's avatar
Michael Yang committed
281

282
283
284
285
286
287
288
289
290
	for _, adapter := range adapters {
		cAdapter := C.CString(adapter)
		defer C.free(unsafe.Pointer(cAdapter))

		if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
			return nil, fmt.Errorf("failed to load adapter %s", adapter)
		}
	}

Michael Yang's avatar
Michael Yang committed
291
292
293
294
295
296
	// warm up the model
	bos := []C.llama_token{C.llama_token_bos()}
	C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
	C.llama_reset_timings(llm.ctx)

	return &llm, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
297
298
}

299
func (llm *llama) Close() {
Michael Yang's avatar
Michael Yang committed
300
301
302
303
304
	llm.gc = true

	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
305
306
307
308
	defer C.llama_free_model(llm.model)
	defer C.llama_free(llm.ctx)

	C.llama_print_timings(llm.ctx)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
309
310
}

311
312
313
314
func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
315
316
var errNeedMoreData = errors.New("need more data")

317
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
Michael Yang's avatar
Michael Yang committed
318
319
	C.llama_reset_timings(llm.ctx)

320
	llm.marshalPrompt(ctx, prompt)
Michael Yang's avatar
Michael Yang committed
321
322
323
324
325
326

	C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))

	var b bytes.Buffer
	for {
		token, err := llm.next()
327
328
329
		if llm.gc {
			return nil
		} else if errors.Is(err, io.EOF) {
Michael Yang's avatar
Michael Yang committed
330
331
332
333
334
			break
		} else if err != nil {
			return err
		}

335
		b.WriteString(llm.Decode(int(token)))
Michael Yang's avatar
Michael Yang committed
336

337
338
339
		stop, endsWithStopPrefix := handleStopSequences(&b, llm.Stop)
		if endsWithStopPrefix {
			continue
Michael Yang's avatar
Michael Yang committed
340
341
		}

Michael Yang's avatar
Michael Yang committed
342
343
344
		if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
			fn(api.GenerateResponse{Response: b.String()})
			b.Reset()
Michael Yang's avatar
Michael Yang committed
345
		}
346
347
348
		if stop {
			break
		}
Michael Yang's avatar
Michael Yang committed
349
	}
Michael Yang's avatar
Michael Yang committed
350

351
352
353
	embd := make([]int, len(llm.embd))
	for i := range llm.embd {
		embd[i] = int(llm.embd[i])
Jeffrey Morgan's avatar
Jeffrey Morgan committed
354
355
	}

Michael Yang's avatar
Michael Yang committed
356
357
358
	timings := C.llama_get_timings(llm.ctx)
	fn(api.GenerateResponse{
		Done:               true,
359
		Context:            embd,
Michael Yang's avatar
Michael Yang committed
360
361
		SampleCount:        int(timings.n_sample),
		SampleDuration:     parseDurationMs(float64(timings.t_sample_ms)),
Michael Yang's avatar
Michael Yang committed
362
363
364
365
366
367
368
369
370
		PromptEvalCount:    int(timings.n_p_eval),
		PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
		EvalCount:          int(timings.n_eval),
		EvalDuration:       parseDurationMs(float64(timings.t_eval_ms)),
	})

	return nil
}

371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
// handleStopSequences checks whether b contains any of the stop sequences, or ends with a prefix of
// any stop sequence (and therefore might contain data that should not ultimately be returned to the
// client).
//
// If b contains a stop sequence, it modifies b to remove the stop sequence and all subsequent data.
func handleStopSequences(b *bytes.Buffer, stopSequences []string) (stop bool, endsWithStopPrefix bool) {
	s := b.String()
	for _, seq := range stopSequences {
		// Check for an exact or substring match.
		if i := strings.Index(s, seq); i != -1 {
			b.Truncate(i)
			return true, false
		}

		// Check if b ends with a prefix of the stop sequence.
		if len(seq) > 1 {
			for i := 1; i < len(seq); i++ {
				if strings.HasSuffix(s, seq[:i]) {
					return false, true
				}
			}
Michael Yang's avatar
Michael Yang committed
392
393
394
		}
	}

395
	return false, false
Michael Yang's avatar
Michael Yang committed
396
397
}

398
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
399
	tokens := append(ctx, llm.Encode(prompt)...)
Michael Yang's avatar
Michael Yang committed
400
401
402
403
	if llm.NumKeep < 0 {
		llm.NumKeep = len(tokens)
	}

404
405
406
407
408
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

Michael Yang's avatar
Michael Yang committed
409
410
411
412
413
414
415
416
	// min(llm.NumCtx - 4, llm.NumKeep)
	if llm.NumCtx-4 < llm.NumKeep {
		llm.NumKeep = llm.NumCtx - 4
	}

	if len(tokens) >= llm.NumCtx {
		// truncate input
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
417
418
419
420
		truncated := cTokens[:llm.NumKeep]
		erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
		truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
		copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
Michael Yang's avatar
Michael Yang committed
421

422
		cTokens = truncated
Michael Yang's avatar
Michael Yang committed
423
424
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
	} else {
425
426
		llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
		llm.last = append(llm.last, cTokens...)
Michael Yang's avatar
Michael Yang committed
427
428
429
	}

	var i int
430
	for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
Michael Yang's avatar
Michael Yang committed
431
432
433
		// noop
	}

434
435
	llm.embd = cTokens
	if i == len(cTokens) {
Michael Yang's avatar
Michael Yang committed
436
437
438
439
440
441
442
		// evaluate at least one token to generate logits
		i--
	}

	llm.cursor = i

	log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
443
	return cTokens
Michael Yang's avatar
Michael Yang committed
444
}
Michael Yang's avatar
Michael Yang committed
445

446
func (llm *llama) Encode(prompt string) []int {
Michael Yang's avatar
Michael Yang committed
447
448
	cPrompt := C.CString(prompt)
	defer C.free(unsafe.Pointer(cPrompt))
Michael Yang's avatar
Michael Yang committed
449

450
451
452
453
454
455
456
457
	cTokens := make([]C.llama_token, len(prompt)+1)
	if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
		tokens := make([]int, n)
		for i := range cTokens[:n] {
			tokens[i] = int(cTokens[i])
		}

		return tokens
Jeffrey Morgan's avatar
Jeffrey Morgan committed
458
459
460
461
462
	}

	return nil
}

463
func (llm *llama) Decode(tokens ...int) string {
Michael Yang's avatar
Michael Yang committed
464
465
	var sb strings.Builder
	for _, token := range tokens {
466
		sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
Jeffrey Morgan's avatar
Jeffrey Morgan committed
467
468
	}

Michael Yang's avatar
Michael Yang committed
469
	return sb.String()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
470
471
}

472
func (llm *llama) next() (C.llama_token, error) {
473
474
475
	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
476
477
478
479
	if len(llm.embd) >= llm.NumCtx {
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
		truncated := llm.embd[:llm.NumKeep]
		truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
Michael Yang's avatar
Michael Yang committed
480

Michael Yang's avatar
Michael Yang committed
481
482
483
484
		llm.embd = truncated
		llm.cursor = llm.NumKeep
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
	}
Michael Yang's avatar
Michael Yang committed
485

Michael Yang's avatar
Michael Yang committed
486
	for {
487
488
489
490
		if llm.gc {
			return 0, io.EOF
		}

Michael Yang's avatar
Michael Yang committed
491
		if llm.cursor >= len(llm.embd) {
492
			break
Michael Yang's avatar
Michael Yang committed
493
494
		}

Michael Yang's avatar
Michael Yang committed
495
496
497
		numEval := len(llm.embd) - llm.cursor
		if numEval > llm.NumBatch {
			numEval = llm.NumBatch
Michael Yang's avatar
Michael Yang committed
498
		}
499

Michael Yang's avatar
Michael Yang committed
500
501
		if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
			return 0, fmt.Errorf("llama_eval: %d", retval)
502
		}
Michael Yang's avatar
Michael Yang committed
503

Michael Yang's avatar
Michael Yang committed
504
		llm.cursor += numEval
Jeffrey Morgan's avatar
Jeffrey Morgan committed
505
506
	}

Michael Yang's avatar
Michael Yang committed
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
	var sampleOpts C.struct_llama_sample_options
	sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
	sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
	sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
	sampleOpts.temperature = C.float(llm.Temperature)
	sampleOpts.top_k = C.int(llm.TopK)
	sampleOpts.top_p = C.float(llm.TopP)
	sampleOpts.tfs_z = C.float(llm.TFSZ)
	sampleOpts.typical_p = C.float(llm.TypicalP)
	sampleOpts.mirostat = C.int(llm.Mirostat)
	sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
	sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
	sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)

	numVocab := C.llama_n_vocab(llm.ctx)
Michael Yang's avatar
Michael Yang committed
522
523
	logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)

Michael Yang's avatar
Michael Yang committed
524
525
526
527
528
	// TODO: logit bias

	candidates := make([]C.llama_token_data, numVocab)
	for i := range logits {
		candidates[i] = C.llama_token_data{
Michael Yang's avatar
Michael Yang committed
529
530
531
			id:    C.int(i),
			logit: logits[i],
			p:     0,
Michael Yang's avatar
Michael Yang committed
532
		}
Michael Yang's avatar
Michael Yang committed
533
	}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
534

Michael Yang's avatar
Michael Yang committed
535
536
537
538
539
540
541
542
543
544
545
	repeatLastN := llm.RepeatLastN
	if len(llm.last) < repeatLastN {
		repeatLastN = len(llm.last)
	}

	if llm.NumCtx < repeatLastN {
		repeatLastN = llm.NumCtx
	}

	lastN := llm.last[len(llm.last)-repeatLastN:]

Michael Yang's avatar
Michael Yang committed
546
547
	token := C.llama_sample(
		llm.ctx,
Michael Yang's avatar
Michael Yang committed
548
549
550
551
552
553
554
555
556
557
		unsafe.SliceData(candidates), C.size_t(len(candidates)),
		unsafe.SliceData(lastN), C.size_t(len(lastN)),
		&sampleOpts,
	)

	llm.last = append(llm.last, token)
	llm.embd = append(llm.embd, token)

	if token == C.llama_token_eos() {
		return 0, io.EOF
Jeffrey Morgan's avatar
Jeffrey Morgan committed
558
	}
Michael Yang's avatar
Michael Yang committed
559

Michael Yang's avatar
Michael Yang committed
560
	return token, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
561
}
562

563
func (llm *llama) Embedding(input string) ([]float64, error) {
564
565
566
567
	if !llm.EmbeddingOnly {
		return nil, errors.New("llama: embedding not enabled")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
568
	tokens := llm.Encode(input)
569
570
571
572
	if tokens == nil {
		return nil, errors.New("llama: tokenize embedding")
	}

573
574
575
576
577
578
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

	retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
579
580
581
582
	if retval != 0 {
		return nil, errors.New("llama: eval")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
583
	C.llama_print_timings(llm.ctx)
Bruce MacDonald's avatar
Bruce MacDonald committed
584

585
	n := C.llama_n_embd(llm.ctx)
586
587
588
	if n <= 0 {
		return nil, errors.New("llama: no embeddings generated")
	}
589
	cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
590

591
592
593
	embeddings := make([]float64, len(cEmbeddings))
	for i, v := range cEmbeddings {
		embeddings[i] = float64(v)
594
	}
595
	return embeddings, nil
596
}