llama.go 22.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
23
	"sync"
24
25
26
	"time"

	"github.com/jmorganca/ollama/api"
Michael Yang's avatar
Michael Yang committed
27
	"github.com/jmorganca/ollama/format"
28
29
)

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

Bruce MacDonald's avatar
Bruce MacDonald committed
58
//go:embed llama.cpp/*/build/*/bin/*
59
60
var llamaCppEmbed embed.FS

61
type ModelRunner struct {
62
63
	Path        string // path to the model runner executable
	Accelerated bool
64
}
65

66
func chooseRunners(workDir, runnerType string) []ModelRunner {
67
	buildPath := path.Join("llama.cpp", runnerType, "build")
68
	var runners []ModelRunner
69

70
71
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
72
73
	switch runtime.GOOS {
	case "darwin":
74
75
76
77
		if runtime.GOARCH == "arm64" {
			runners = []ModelRunner{{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")}}
		} else {
			runners = []ModelRunner{{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")}}
78
		}
79
	case "linux":
80
81
82
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
83
84
85
		}
	case "windows":
		// TODO: select windows GPU runner here when available
86
		runners = []ModelRunner{
Jing Zhang's avatar
Jing Zhang committed
87
			{Path: path.Join(buildPath, "cuda", "bin", "Release", "ollama-runner.exe"), Accelerated: true},
88
			{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
89
		}
90
91
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
92
93
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
94
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
95
	}
96

97
98
99
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
100
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
101
		if err != nil {
102
			// this is expected, ollama may be compiled without all runners packed in
Michael Yang's avatar
Michael Yang committed
103
			log.Printf("%s runner not found: %v", r.Path, err)
104
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
105
		}
106

107
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
108
109
			runnerAvailable = true

110
111
112
113
114
115
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
116
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
117
			destPath := filepath.Join(workDir, filepath.Dir(f))
118
119
120
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
121

Bruce MacDonald's avatar
Bruce MacDonald committed
122
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
139
			}
140
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
141
	}
142
143
144
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
145

146
147
148
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
149
		// clean the ModelRunner paths so that they match the OS we are running on
150
151
152
153
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
			Path:        filepath.Clean(path.Join(workDir, r.Path)),
			Accelerated: r.Accelerated,
		})
Bruce MacDonald's avatar
Bruce MacDonald committed
154
	}
155

156
	return localRunnersByPriority
157
158
159
160
161
162
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
163
164
func (llm *llamaModel) ModelFamily() string {
	return "llama"
165
166
}

Michael Yang's avatar
Michael Yang committed
167
168
func llamaModelType(numLayer uint32) string {
	switch numLayer {
169
	case 26:
Michael Yang's avatar
Michael Yang committed
170
		return "3B"
171
	case 32:
Michael Yang's avatar
Michael Yang committed
172
		return "7B"
173
	case 40:
Michael Yang's avatar
Michael Yang committed
174
		return "13B"
175
	case 48:
Michael Yang's avatar
Michael Yang committed
176
		return "34B"
177
	case 60:
Michael Yang's avatar
Michael Yang committed
178
		return "30B"
179
	case 80:
Michael Yang's avatar
Michael Yang committed
180
181
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
182
		return "unknown"
183
	}
Michael Yang's avatar
Michael Yang committed
184
}
185

Michael Yang's avatar
Michael Yang committed
186
187
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
188
189
}

Michael Yang's avatar
Michael Yang committed
190
191
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
192
193
}

194
195
196
197
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

198
199
200
201
202
203
204
205
206
207
208
209
210
211
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
212
	FileType uint32
213
214
215
}

type Running struct {
216
217
218
219
220
221
	Port          int
	Cmd           *exec.Cmd
	Cancel        context.CancelFunc
	exitOnce      sync.Once
	exitCh        chan error // channel to receive the exit status of the subprocess
	*StatusWriter            // captures error messages from the llama runner process
222
223
224
225
226
227
228
}

type llama struct {
	api.Options
	Running
}

229
var (
Jeffrey Morgan's avatar
Jeffrey Morgan committed
230
	errNvidiaSMI     = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
231
232
	errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
)
233

Michael Yang's avatar
Michael Yang committed
234
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
235
func CheckVRAM() (int64, error) {
236
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
237
238
239
240
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
241
		return 0, errNvidiaSMI
242
243
	}

Michael Yang's avatar
Michael Yang committed
244
	var freeMiB int64
245
246
247
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
248
249
250
251
		if strings.Contains(line, "[Insufficient Permissions]") {
			return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
		}

Michael Yang's avatar
Michael Yang committed
252
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
253
254
255
256
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

Michael Yang's avatar
Michael Yang committed
257
		freeMiB += vram
258
259
	}

Michael Yang's avatar
Michael Yang committed
260
261
	freeBytes := freeMiB * 1024 * 1024
	if freeBytes < 2*format.GigaByte {
262
263
		log.Printf("less than 2 GB VRAM available")
		return 0, errAvailableVRAM
Michael Yang's avatar
Michael Yang committed
264
265
	}

Michael Yang's avatar
Michael Yang committed
266
	return freeBytes, nil
267
268
}

269
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
270
271
272
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
Jing Zhang's avatar
Jing Zhang committed
273
	if runtime.GOOS == "linux" || runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
274
		freeBytes, err := CheckVRAM()
275
		if err != nil {
276
			if !errors.Is(err, errNvidiaSMI) {
277
278
279
280
281
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
282

283
284
285
286
287
		/*
		 Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
		 We can store the model weights and the kv cache in vram,
		 to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
		*/
288
289
		bytesPerLayer := fileSizeBytes / numLayer

290
291
		// 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
		layers := int(freeBytes/bytesPerLayer) * 3 / 4
292
		log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
293

294
		return layers
295
	}
296
297
	// default to enable metal on macOS
	return 1
298
299
}

300
301
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
302
303
	ErrCh      chan error
	LastErrMsg string
304
305
306
307
308
309
310
311
312
}

func NewStatusWriter() *StatusWriter {
	return &StatusWriter{
		ErrCh: make(chan error, 1),
	}
}

func (w *StatusWriter) Write(b []byte) (int, error) {
313
	var errMsg string
314
	if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
315
316
317
		errMsg = string(bytes.TrimSpace(after))
	} else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
		errMsg = string(bytes.TrimSpace(after))
318
	}
319
320
321
322
323
324

	if errMsg != "" {
		w.LastErrMsg = errMsg
		w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
	}

325
326
327
	return os.Stderr.Write(b)
}

328
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
329
330
	fileInfo, err := os.Stat(model)
	if err != nil {
331
332
333
334
335
336
337
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

338
	numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
339
340
341
342
	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
343
		"--n-gpu-layers", fmt.Sprintf("%d", numGPU),
344
345
346
		"--embedding",
	}

347
348
349
350
	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

351
352
353
354
355
356
357
358
	if opts.RopeFrequencyBase > 0 {
		params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
	}

	if opts.RopeFrequencyScale > 0 {
		params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
359
360
361
362
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

385
386
	var runnerErr error

387
	// start the llama.cpp server with a retry in case the port is already in use
388
	for _, runner := range runners {
389
390
391
392
393
		if runner.Accelerated && numGPU == 0 {
			log.Printf("skipping accelerated runner because num_gpu=0")
			continue
		}

394
395
396
397
398
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

399
400
401
402
403
404
405
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Michael Yang's avatar
Michael Yang committed
406
407
408
409
410
411
412
413
414

		var libraryPaths []string
		if libraryPath, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
			libraryPaths = append(libraryPaths, libraryPath)
		}

		libraryPaths = append(libraryPaths, filepath.Dir(runner.Path))

		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", strings.Join(libraryPaths, ":")))
415
		cmd.Stdout = os.Stderr
416
417
		statusWriter := NewStatusWriter()
		cmd.Stderr = statusWriter
418

419
		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
420

421
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
422
		if err := llm.Cmd.Start(); err != nil {
423
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
424
425
426
			continue
		}

427
		// monitor the llama runner process and signal when it exits
428
		go func() {
429
			err := llm.Cmd.Wait()
430
431
432
433
434
435
436
			// default to printing the exit message of the command process, it will probably just say 'exit staus 1'
			errMsg := err.Error()
			// try to set a better error message if llama runner logs captured an error
			if statusWriter.LastErrMsg != "" {
				errMsg = statusWriter.LastErrMsg
			}
			log.Println(errMsg)
437
438
439
440
			// llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
			llm.exitOnce.Do(func() {
				close(llm.exitCh)
			})
441
442
		}()

443
		if err := waitForServer(llm); err != nil {
444
			log.Printf("error starting llama runner: %v", err)
445
			llm.Close()
446
447
448
449
450
451
452
453
454
455
456

			// default the runnerErr to the error returned by the most recent llama runner process
			runnerErr = err

			// capture the error directly from the runner process, if any
			select {
			case runnerErr = <-statusWriter.ErrCh:
			default:
				// the runner process probably timed out
			}

457
458
459
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
460

461
462
463
464
		// server started successfully
		return llm, nil
	}

465
466
467
468
469
	if runnerErr != nil {
		// this is the error returned from the llama runner process that failed most recently
		return nil, runnerErr
	}

470
	return nil, fmt.Errorf("failed to start a llama runner")
471
472
473
474
}

func waitForServer(llm *llama) error {
	start := time.Now()
475
	expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
476
	ticker := time.NewTicker(200 * time.Millisecond)
477
	defer ticker.Stop()
478

479
	log.Print("waiting for llama runner to start responding")
480
481
482
483
	for {
		select {
		case <-llm.exitCh:
			// failed to start subprocess
484
			return fmt.Errorf("llama runner process has terminated")
485
486
487
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
488
				return fmt.Errorf("timed out waiting for llama runner to start")
489
			}
490

491
492
493
494
495
			if err := llm.Ping(context.Background()); err == nil {
				// success
				log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
				return nil
			}
496
497
498
499
500
		}
	}
}

func (llm *llama) Close() {
501
	// signal the sub-process to terminate
Bruce MacDonald's avatar
Bruce MacDonald committed
502
	llm.Cancel()
503
504

	// wait for the command to exit to prevent race conditions with the next run
505
506
	<-llm.exitCh

507
508
	if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
		log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
509
510
	} else {
		log.Print("llama runner stopped successfully")
511
	}
512
513
514
515
516
517
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
518
type prediction struct {
Michael Yang's avatar
Michael Yang committed
519
520
521
522
523
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

Michael Yang's avatar
Michael Yang committed
524
525
526
527
528
529
	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
530
531
}

Michael Yang's avatar
Michael Yang committed
532
const maxBufferSize = 512 * format.KiloByte
533

Bruce MacDonald's avatar
Bruce MacDonald committed
534
535
536
537
538
539
540
type PredictRequest struct {
	Model            string
	Prompt           string
	Format           string
	CheckpointStart  time.Time
	CheckpointLoaded time.Time
}
541

Bruce MacDonald's avatar
Bruce MacDonald committed
542
543
544
545
546
547
548
549
550
551
552
553
554
type PredictResponse struct {
	Model              string
	CreatedAt          time.Time
	TotalDuration      time.Duration
	LoadDuration       time.Duration
	Content            string
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
	Context            []int
}
555

Bruce MacDonald's avatar
Bruce MacDonald committed
556
func (llm *llama) Predict(ctx context.Context, predict PredictRequest, fn func(PredictResponse)) error {
Michael Yang's avatar
Michael Yang committed
557
	request := map[string]any{
Bruce MacDonald's avatar
Bruce MacDonald committed
558
		"prompt":            predict.Prompt,
Michael Yang's avatar
Michael Yang committed
559
560
561
		"stream":            true,
		"n_predict":         llm.NumPredict,
		"n_keep":            llm.NumKeep,
562
		"main_gpu":          llm.MainGPU,
Michael Yang's avatar
Michael Yang committed
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
		"temperature":       llm.Temperature,
		"top_k":             llm.TopK,
		"top_p":             llm.TopP,
		"tfs_z":             llm.TFSZ,
		"typical_p":         llm.TypicalP,
		"repeat_last_n":     llm.RepeatLastN,
		"repeat_penalty":    llm.RepeatPenalty,
		"presence_penalty":  llm.PresencePenalty,
		"frequency_penalty": llm.FrequencyPenalty,
		"mirostat":          llm.Mirostat,
		"mirostat_tau":      llm.MirostatTau,
		"mirostat_eta":      llm.MirostatEta,
		"penalize_nl":       llm.PenalizeNewline,
		"seed":              llm.Seed,
		"stop":              llm.Stop,
578
	}
579

Bruce MacDonald's avatar
Bruce MacDonald committed
580
	if predict.Format == "json" {
581
582
583
		request["grammar"] = jsonGrammar
	}

584
	// Handling JSON marshaling with special characters unescaped.
585
586
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
587
588
	enc.SetEscapeHTML(false)

Michael Yang's avatar
Michael Yang committed
589
	if err := enc.Encode(request); err != nil {
590
		return fmt.Errorf("failed to marshal data: %v", err)
591
592
	}

Michael Yang's avatar
Michael Yang committed
593
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
594
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
616
617
618
	// increase the buffer size to avoid running out of space
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
619
620
621
622
623
624
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
Michael Yang's avatar
Michael Yang committed
625
626
			line := scanner.Bytes()
			if len(line) == 0 {
627
628
629
				continue
			}

Michael Yang's avatar
Michael Yang committed
630
			if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
Michael Yang's avatar
Michael Yang committed
631
				var p prediction
Michael Yang's avatar
Michael Yang committed
632
				if err := json.Unmarshal(evt, &p); err != nil {
Michael Yang's avatar
Michael Yang committed
633
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
634
635
				}

Michael Yang's avatar
Michael Yang committed
636
				if p.Content != "" {
Bruce MacDonald's avatar
Bruce MacDonald committed
637
638
639
640
641
					fn(PredictResponse{
						Model:     predict.Model,
						CreatedAt: time.Now().UTC(),
						Content:   p.Content,
					})
Michael Yang's avatar
Michael Yang committed
642
				}
Michael Yang's avatar
Michael Yang committed
643
644

				if p.Stop {
Bruce MacDonald's avatar
Bruce MacDonald committed
645
646
647
648
					fn(PredictResponse{
						Model:         predict.Model,
						CreatedAt:     time.Now().UTC(),
						TotalDuration: time.Since(predict.CheckpointStart),
649

650
						Done:               true,
Michael Yang's avatar
Michael Yang committed
651
652
653
654
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
655
					})
Michael Yang's avatar
Michael Yang committed
656
					return nil
657
658
659
660
661
662
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
663
664
665
666
667
668
669
670
		if strings.Contains(err.Error(), "unexpected EOF") {
			// this means the llama runner subprocess crashed
			llm.Close()
			if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
				return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
			}
			return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
		}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
817
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
818
819
820
821
822
823
824
825
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}