llama.go 12 KB
Newer Older
1
package llm
Jeffrey Morgan's avatar
Jeffrey Morgan committed
2

Michael Yang's avatar
Michael Yang committed
3
/*
Michael Yang's avatar
Michael Yang committed
4
#cgo CPPFLAGS: -O3 -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
Michael Yang's avatar
Michael Yang committed
5
#cgo CXXFLAGS: -std=gnu++11
6
7
#cgo darwin CPPFLAGS:  -DGGML_USE_ACCELERATE
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
Michael Yang's avatar
Michael Yang committed
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"

struct llama_sample_options
{
	float repeat_penalty;
	float frequency_penalty;
	float presence_penalty;
	float temperature;
	int32_t top_k;
	float top_p;
	float tfs_z;
	float typical_p;
	int mirostat;
	float mirostat_tau;
	float mirostat_eta;
Michael Yang's avatar
Michael Yang committed
25
	bool penalize_newline;
Michael Yang's avatar
Michael Yang committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
};

llama_token llama_sample(
		struct llama_context *ctx,
		struct llama_token_data *candidates,
		size_t n_candidates,
		const llama_token *last_tokens,
		size_t n_last_tokens,
		struct llama_sample_options *opts)
{
	llama_token_data_array candidates_p = {
		candidates,
		n_candidates,
		false,
	};

Michael Yang's avatar
Michael Yang committed
42
43
	struct llama_token_data newline = candidates_p.data[llama_token_nl()];

Michael Yang's avatar
Michael Yang committed
44
45
46
47
48
49
50
51
52
53
	llama_sample_repetition_penalty(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->repeat_penalty);

	llama_sample_frequency_and_presence_penalties(
		ctx, &candidates_p,
		last_tokens, n_last_tokens,
		opts->frequency_penalty, opts->presence_penalty);

Michael Yang's avatar
Michael Yang committed
54
55
56
57
	if (!opts->penalize_newline) {
		candidates_p.data[llama_token_nl()] = newline;
	}

Michael Yang's avatar
Michael Yang committed
58
59
60
	if (opts->temperature <= 0) {
		return llama_sample_token_greedy(ctx, &candidates_p);
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
61

Michael Yang's avatar
Michael Yang committed
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
	if (opts->mirostat == 1) {
		int mirostat_m = 100;
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			mirostat_m, &mirostat_mu);
	} else if (opts->mirostat == 2) {
		float mirostat_mu = 2.0f * opts->mirostat_tau;
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token_mirostat_v2(
			ctx, &candidates_p,
			opts->mirostat_tau, opts->mirostat_eta,
			&mirostat_mu);
	} else {
		llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
		llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
		llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
		llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
		llama_sample_temperature(ctx, &candidates_p, opts->temperature);
		return llama_sample_token(ctx, &candidates_p);
	}
}
*/
import "C"
88

Jeffrey Morgan's avatar
Jeffrey Morgan committed
89
import (
Michael Yang's avatar
Michael Yang committed
90
	"bytes"
Michael Yang's avatar
Michael Yang committed
91
	"embed"
Michael Yang's avatar
Michael Yang committed
92
	"errors"
93
	"fmt"
Michael Yang's avatar
Michael Yang committed
94
	"io"
Michael Yang's avatar
Michael Yang committed
95
	"log"
Michael Yang's avatar
Michael Yang committed
96
	"os"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
97
	"strings"
Michael Yang's avatar
Michael Yang committed
98
	"sync"
Michael Yang's avatar
Michael Yang committed
99
	"unicode/utf8"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
100
	"unsafe"
Michael Yang's avatar
Michael Yang committed
101
102

	"github.com/jmorganca/ollama/api"
Jeffrey Morgan's avatar
Jeffrey Morgan committed
103
104
)

Michael Yang's avatar
Michael Yang committed
105
106
107
//go:embed ggml-metal.metal
var fs embed.FS

108
type llama struct {
Michael Yang's avatar
Michael Yang committed
109
110
111
	params *C.struct_llama_context_params
	model  *C.struct_llama_model
	ctx    *C.struct_llama_context
Jeffrey Morgan's avatar
Jeffrey Morgan committed
112

Michael Yang's avatar
Michael Yang committed
113
114
115
116
	last   []C.llama_token
	embd   []C.llama_token
	cursor int

Michael Yang's avatar
Michael Yang committed
117
118
119
	mu sync.Mutex
	gc bool

Michael Yang's avatar
Michael Yang committed
120
121
	api.Options
}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
122

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32
	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
	FileType
}

139
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
Michael Yang's avatar
Michael Yang committed
140
141
	if _, err := os.Stat(model); err != nil {
		return nil, err
Jeffrey Morgan's avatar
Jeffrey Morgan committed
142
143
	}

144
	llm := llama{Options: opts}
Michael Yang's avatar
Michael Yang committed
145

Michael Yang's avatar
Michael Yang committed
146
	C.llama_backend_init(C.bool(llm.UseNUMA))
Michael Yang's avatar
Michael Yang committed
147
148
149
150
151

	params := C.llama_context_default_params()
	params.seed = C.uint(llm.Seed)
	params.n_ctx = C.int(llm.NumCtx)
	params.n_batch = C.int(llm.NumBatch)
Michael Yang's avatar
Michael Yang committed
152
	params.n_gqa = C.int(llm.NumGQA)
Michael Yang's avatar
Michael Yang committed
153
154
155
156
157
158
159
160
161
	params.n_gpu_layers = C.int(llm.NumGPU)
	params.main_gpu = C.int(llm.MainGPU)
	params.low_vram = C.bool(llm.LowVRAM)
	params.f16_kv = C.bool(llm.F16KV)
	params.logits_all = C.bool(llm.LogitsAll)
	params.vocab_only = C.bool(llm.VocabOnly)
	params.use_mmap = C.bool(llm.UseMMap)
	params.use_mlock = C.bool(llm.UseMLock)
	params.embedding = C.bool(llm.EmbeddingOnly)
162
163
	params.rope_freq_base = C.float(llm.RopeFrequencyBase)
	params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
164
165
166
167
168
169

	if len(adapters) > 0 && llm.UseMMap {
		log.Printf("must disable mmap to use lora adapters")
		params.use_mmap = C.bool(false)
	}

Michael Yang's avatar
Michael Yang committed
170
171
172
173
174
175
	llm.params = &params

	cModel := C.CString(model)
	defer C.free(unsafe.Pointer(cModel))

	llm.model = C.llama_load_model_from_file(cModel, params)
176
177
178
179
	if llm.model == nil {
		return nil, errors.New("failed to load model")
	}

Michael Yang's avatar
Michael Yang committed
180
	llm.ctx = C.llama_new_context_with_model(llm.model, params)
181
182
183
	if llm.ctx == nil {
		return nil, errors.New("failed to create context")
	}
Michael Yang's avatar
Michael Yang committed
184

185
186
187
188
189
190
191
192
193
	for _, adapter := range adapters {
		cAdapter := C.CString(adapter)
		defer C.free(unsafe.Pointer(cAdapter))

		if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
			return nil, fmt.Errorf("failed to load adapter %s", adapter)
		}
	}

Michael Yang's avatar
Michael Yang committed
194
195
196
197
198
199
	// warm up the model
	bos := []C.llama_token{C.llama_token_bos()}
	C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
	C.llama_reset_timings(llm.ctx)

	return &llm, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
200
201
}

202
func (llm *llama) Close() {
Michael Yang's avatar
Michael Yang committed
203
204
205
206
207
	llm.gc = true

	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
208
209
210
211
	defer C.llama_free_model(llm.model)
	defer C.llama_free(llm.ctx)

	C.llama_print_timings(llm.ctx)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
212
213
}

214
215
216
217
func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
218
219
var errNeedMoreData = errors.New("need more data")

220
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
Michael Yang's avatar
Michael Yang committed
221
222
	C.llama_reset_timings(llm.ctx)

223
	llm.marshalPrompt(ctx, prompt)
Michael Yang's avatar
Michael Yang committed
224
225
226
227
228
229

	C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))

	var b bytes.Buffer
	for {
		token, err := llm.next()
230
231
232
		if llm.gc {
			return nil
		} else if errors.Is(err, io.EOF) {
Michael Yang's avatar
Michael Yang committed
233
234
235
236
237
			break
		} else if err != nil {
			return err
		}

238
		b.WriteString(llm.Decode(int(token)))
Michael Yang's avatar
Michael Yang committed
239
240
241
242
243
244
245
246
247
248
249

		if err := llm.checkStopConditions(b); err != nil {
			if errors.Is(err, io.EOF) {
				break
			} else if errors.Is(err, errNeedMoreData) {
				continue
			}

			return err
		}

Michael Yang's avatar
Michael Yang committed
250
251
252
		if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
			fn(api.GenerateResponse{Response: b.String()})
			b.Reset()
Michael Yang's avatar
Michael Yang committed
253
		}
Michael Yang's avatar
Michael Yang committed
254
	}
Michael Yang's avatar
Michael Yang committed
255

256
257
258
	embd := make([]int, len(llm.embd))
	for i := range llm.embd {
		embd[i] = int(llm.embd[i])
Jeffrey Morgan's avatar
Jeffrey Morgan committed
259
260
	}

Michael Yang's avatar
Michael Yang committed
261
262
263
	timings := C.llama_get_timings(llm.ctx)
	fn(api.GenerateResponse{
		Done:               true,
264
		Context:            embd,
Michael Yang's avatar
Michael Yang committed
265
266
		SampleCount:        int(timings.n_sample),
		SampleDuration:     parseDurationMs(float64(timings.t_sample_ms)),
Michael Yang's avatar
Michael Yang committed
267
268
269
270
271
272
273
274
275
		PromptEvalCount:    int(timings.n_p_eval),
		PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
		EvalCount:          int(timings.n_eval),
		EvalDuration:       parseDurationMs(float64(timings.t_eval_ms)),
	})

	return nil
}

276
func (llm *llama) checkStopConditions(b bytes.Buffer) error {
277
	for _, stopCondition := range llm.Stop {
278
		if stopCondition == strings.TrimSpace(b.String()) {
Michael Yang's avatar
Michael Yang committed
279
			return io.EOF
280
		} else if strings.HasPrefix(stopCondition, strings.TrimSpace(b.String())) {
Michael Yang's avatar
Michael Yang committed
281
282
283
284
285
286
287
			return errNeedMoreData
		}
	}

	return nil
}

288
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
289
	tokens := append(ctx, llm.Encode(prompt)...)
Michael Yang's avatar
Michael Yang committed
290
291
292
293
	if llm.NumKeep < 0 {
		llm.NumKeep = len(tokens)
	}

294
295
296
297
298
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

Michael Yang's avatar
Michael Yang committed
299
300
301
302
303
304
305
306
	// min(llm.NumCtx - 4, llm.NumKeep)
	if llm.NumCtx-4 < llm.NumKeep {
		llm.NumKeep = llm.NumCtx - 4
	}

	if len(tokens) >= llm.NumCtx {
		// truncate input
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
307
308
309
310
		truncated := cTokens[:llm.NumKeep]
		erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
		truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
		copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
Michael Yang's avatar
Michael Yang committed
311

312
		cTokens = truncated
Michael Yang's avatar
Michael Yang committed
313
314
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
	} else {
315
316
		llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
		llm.last = append(llm.last, cTokens...)
Michael Yang's avatar
Michael Yang committed
317
318
319
	}

	var i int
320
	for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
Michael Yang's avatar
Michael Yang committed
321
322
323
		// noop
	}

324
325
	llm.embd = cTokens
	if i == len(cTokens) {
Michael Yang's avatar
Michael Yang committed
326
327
328
329
330
331
332
		// evaluate at least one token to generate logits
		i--
	}

	llm.cursor = i

	log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
333
	return cTokens
Michael Yang's avatar
Michael Yang committed
334
}
Michael Yang's avatar
Michael Yang committed
335

336
func (llm *llama) Encode(prompt string) []int {
Michael Yang's avatar
Michael Yang committed
337
338
	cPrompt := C.CString(prompt)
	defer C.free(unsafe.Pointer(cPrompt))
Michael Yang's avatar
Michael Yang committed
339

340
341
342
343
344
345
346
347
	cTokens := make([]C.llama_token, len(prompt)+1)
	if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
		tokens := make([]int, n)
		for i := range cTokens[:n] {
			tokens[i] = int(cTokens[i])
		}

		return tokens
Jeffrey Morgan's avatar
Jeffrey Morgan committed
348
349
350
351
352
	}

	return nil
}

353
func (llm *llama) Decode(tokens ...int) string {
Michael Yang's avatar
Michael Yang committed
354
355
	var sb strings.Builder
	for _, token := range tokens {
356
		sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
Jeffrey Morgan's avatar
Jeffrey Morgan committed
357
358
	}

Michael Yang's avatar
Michael Yang committed
359
	return sb.String()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
360
361
}

362
func (llm *llama) next() (C.llama_token, error) {
363
364
365
	llm.mu.Lock()
	defer llm.mu.Unlock()

Michael Yang's avatar
Michael Yang committed
366
367
368
369
	if len(llm.embd) >= llm.NumCtx {
		numLeft := (llm.NumCtx - llm.NumKeep) / 2
		truncated := llm.embd[:llm.NumKeep]
		truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
Michael Yang's avatar
Michael Yang committed
370

Michael Yang's avatar
Michael Yang committed
371
372
373
374
		llm.embd = truncated
		llm.cursor = llm.NumKeep
		log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
	}
Michael Yang's avatar
Michael Yang committed
375

Michael Yang's avatar
Michael Yang committed
376
	for {
377
378
379
380
		if llm.gc {
			return 0, io.EOF
		}

Michael Yang's avatar
Michael Yang committed
381
		if llm.cursor >= len(llm.embd) {
382
			break
Michael Yang's avatar
Michael Yang committed
383
384
		}

Michael Yang's avatar
Michael Yang committed
385
386
387
		numEval := len(llm.embd) - llm.cursor
		if numEval > llm.NumBatch {
			numEval = llm.NumBatch
Michael Yang's avatar
Michael Yang committed
388
		}
389

Michael Yang's avatar
Michael Yang committed
390
391
		if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
			return 0, fmt.Errorf("llama_eval: %d", retval)
392
		}
Michael Yang's avatar
Michael Yang committed
393

Michael Yang's avatar
Michael Yang committed
394
		llm.cursor += numEval
Jeffrey Morgan's avatar
Jeffrey Morgan committed
395
396
	}

Michael Yang's avatar
Michael Yang committed
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
	var sampleOpts C.struct_llama_sample_options
	sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
	sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
	sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
	sampleOpts.temperature = C.float(llm.Temperature)
	sampleOpts.top_k = C.int(llm.TopK)
	sampleOpts.top_p = C.float(llm.TopP)
	sampleOpts.tfs_z = C.float(llm.TFSZ)
	sampleOpts.typical_p = C.float(llm.TypicalP)
	sampleOpts.mirostat = C.int(llm.Mirostat)
	sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
	sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
	sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)

	numVocab := C.llama_n_vocab(llm.ctx)
Michael Yang's avatar
Michael Yang committed
412
413
	logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)

Michael Yang's avatar
Michael Yang committed
414
415
416
417
418
	// TODO: logit bias

	candidates := make([]C.llama_token_data, numVocab)
	for i := range logits {
		candidates[i] = C.llama_token_data{
Michael Yang's avatar
Michael Yang committed
419
420
421
			id:    C.int(i),
			logit: logits[i],
			p:     0,
Michael Yang's avatar
Michael Yang committed
422
		}
Michael Yang's avatar
Michael Yang committed
423
	}
Jeffrey Morgan's avatar
Jeffrey Morgan committed
424

Michael Yang's avatar
Michael Yang committed
425
426
427
428
429
430
431
432
433
434
435
	repeatLastN := llm.RepeatLastN
	if len(llm.last) < repeatLastN {
		repeatLastN = len(llm.last)
	}

	if llm.NumCtx < repeatLastN {
		repeatLastN = llm.NumCtx
	}

	lastN := llm.last[len(llm.last)-repeatLastN:]

Michael Yang's avatar
Michael Yang committed
436
437
	token := C.llama_sample(
		llm.ctx,
Michael Yang's avatar
Michael Yang committed
438
439
440
441
442
443
444
445
446
447
		unsafe.SliceData(candidates), C.size_t(len(candidates)),
		unsafe.SliceData(lastN), C.size_t(len(lastN)),
		&sampleOpts,
	)

	llm.last = append(llm.last, token)
	llm.embd = append(llm.embd, token)

	if token == C.llama_token_eos() {
		return 0, io.EOF
Jeffrey Morgan's avatar
Jeffrey Morgan committed
448
	}
Michael Yang's avatar
Michael Yang committed
449

Michael Yang's avatar
Michael Yang committed
450
	return token, nil
Jeffrey Morgan's avatar
Jeffrey Morgan committed
451
}
452

453
func (llm *llama) Embedding(input string) ([]float64, error) {
454
455
456
457
	if !llm.EmbeddingOnly {
		return nil, errors.New("llama: embedding not enabled")
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
458
	tokens := llm.Encode(input)
459
460
461
462
	if tokens == nil {
		return nil, errors.New("llama: tokenize embedding")
	}

463
464
465
466
467
468
	cTokens := make([]C.llama_token, len(tokens))
	for i := range tokens {
		cTokens[i] = C.llama_token(tokens[i])
	}

	retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
469
470
471
472
	if retval != 0 {
		return nil, errors.New("llama: eval")
	}

473
	n := C.llama_n_embd(llm.ctx)
474
475
476
	if n <= 0 {
		return nil, errors.New("llama: no embeddings generated")
	}
477
	cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
478

479
480
481
	embeddings := make([]float64, len(cEmbeddings))
	for i, v := range cEmbeddings {
		embeddings[i] = float64(v)
482
	}
483
	return embeddings, nil
484
}