llama.go 22.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
23
	"sync"
24
25
26
	"time"

	"github.com/jmorganca/ollama/api"
Michael Yang's avatar
Michael Yang committed
27
	"github.com/jmorganca/ollama/format"
28
29
)

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

Bruce MacDonald's avatar
Bruce MacDonald committed
58
//go:embed llama.cpp/*/build/*/bin/*
59
60
var llamaCppEmbed embed.FS

61
type ModelRunner struct {
62
63
	Path        string // path to the model runner executable
	Accelerated bool
64
}
65

Bruce MacDonald's avatar
Bruce MacDonald committed
66
67
func chooseRunners(workDir string) []ModelRunner {
	buildPath := path.Join("llama.cpp", "gguf", "build")
68
	var runners []ModelRunner
69

70
71
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
72
73
	switch runtime.GOOS {
	case "darwin":
74
		if runtime.GOARCH == "arm64" {
Bruce MacDonald's avatar
Bruce MacDonald committed
75
			runners = []ModelRunner{{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")}}
76
		} else {
Bruce MacDonald's avatar
Bruce MacDonald committed
77
			runners = []ModelRunner{{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")}}
78
		}
79
	case "linux":
80
		runners = []ModelRunner{
Bruce MacDonald's avatar
Bruce MacDonald committed
81
82
			{Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
83
84
85
		}
	case "windows":
		// TODO: select windows GPU runner here when available
86
		runners = []ModelRunner{
Bruce MacDonald's avatar
Bruce MacDonald committed
87
88
			{Path: path.Join(buildPath, "cuda", "bin", "Release", "ollama-runner.exe"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
89
		}
90
91
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
92
		runners = []ModelRunner{
Bruce MacDonald's avatar
Bruce MacDonald committed
93
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
94
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
95
	}
96

97
98
99
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
100
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
101
		if err != nil {
102
			// this is expected, ollama may be compiled without all runners packed in
Michael Yang's avatar
Michael Yang committed
103
			log.Printf("%s runner not found: %v", r.Path, err)
104
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
105
		}
106

107
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
108
109
			runnerAvailable = true

110
111
112
113
114
115
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
116
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
117
			destPath := filepath.Join(workDir, filepath.Dir(f))
118
119
120
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
121

Bruce MacDonald's avatar
Bruce MacDonald committed
122
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
139
			}
140
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
141
	}
142
	if !runnerAvailable {
Bruce MacDonald's avatar
Bruce MacDonald committed
143
		log.Fatalf("gguf runner not found")
144
	}
145

146
147
148
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
149
		// clean the ModelRunner paths so that they match the OS we are running on
150
151
152
153
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
			Path:        filepath.Clean(path.Join(workDir, r.Path)),
			Accelerated: r.Accelerated,
		})
Bruce MacDonald's avatar
Bruce MacDonald committed
154
	}
155

156
	return localRunnersByPriority
157
158
159
160
161
162
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
163
164
func (llm *llamaModel) ModelFamily() string {
	return "llama"
165
166
}

Michael Yang's avatar
Michael Yang committed
167
168
func llamaModelType(numLayer uint32) string {
	switch numLayer {
169
	case 26:
Michael Yang's avatar
Michael Yang committed
170
		return "3B"
171
	case 32:
Michael Yang's avatar
Michael Yang committed
172
		return "7B"
173
	case 40:
Michael Yang's avatar
Michael Yang committed
174
		return "13B"
175
	case 48:
Michael Yang's avatar
Michael Yang committed
176
		return "34B"
177
	case 60:
Michael Yang's avatar
Michael Yang committed
178
		return "30B"
179
	case 80:
Michael Yang's avatar
Michael Yang committed
180
181
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
182
		return "unknown"
183
	}
Michael Yang's avatar
Michael Yang committed
184
}
185

Michael Yang's avatar
Michael Yang committed
186
187
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
188
189
}

Michael Yang's avatar
Michael Yang committed
190
191
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
192
193
}

194
195
196
197
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

198
199
200
201
202
203
204
205
206
207
208
209
210
211
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
212
	FileType uint32
213
214
215
}

type Running struct {
216
217
218
219
220
221
	Port          int
	Cmd           *exec.Cmd
	Cancel        context.CancelFunc
	exitOnce      sync.Once
	exitCh        chan error // channel to receive the exit status of the subprocess
	*StatusWriter            // captures error messages from the llama runner process
222
223
}

Patrick Devine's avatar
Patrick Devine committed
224
225
226
227
228
type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

229
230
type llama struct {
	api.Options
Patrick Devine's avatar
Patrick Devine committed
231
	ImageData []ImageData
232
233
234
	Running
}

235
var (
Jeffrey Morgan's avatar
Jeffrey Morgan committed
236
	errNvidiaSMI     = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
237
238
	errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
)
239

Michael Yang's avatar
Michael Yang committed
240
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
241
func CheckVRAM() (int64, error) {
242
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
243
244
245
246
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
247
		return 0, errNvidiaSMI
248
249
	}

Michael Yang's avatar
Michael Yang committed
250
	var freeMiB int64
251
252
253
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
254
255
256
257
		if strings.Contains(line, "[Insufficient Permissions]") {
			return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
		}

Michael Yang's avatar
Michael Yang committed
258
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
259
260
261
262
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

Michael Yang's avatar
Michael Yang committed
263
		freeMiB += vram
264
265
	}

Michael Yang's avatar
Michael Yang committed
266
267
	freeBytes := freeMiB * 1024 * 1024
	if freeBytes < 2*format.GigaByte {
268
269
		log.Printf("less than 2 GB VRAM available")
		return 0, errAvailableVRAM
Michael Yang's avatar
Michael Yang committed
270
271
	}

Michael Yang's avatar
Michael Yang committed
272
	return freeBytes, nil
273
274
}

275
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
276
277
278
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
Jing Zhang's avatar
Jing Zhang committed
279
	if runtime.GOOS == "linux" || runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
280
		freeBytes, err := CheckVRAM()
281
		if err != nil {
282
			if !errors.Is(err, errNvidiaSMI) {
283
284
285
286
287
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
288

289
290
291
292
293
		/*
		 Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
		 We can store the model weights and the kv cache in vram,
		 to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
		*/
294
295
		bytesPerLayer := fileSizeBytes / numLayer

296
297
		// 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
		layers := int(freeBytes/bytesPerLayer) * 3 / 4
298
		log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
299

300
		return layers
301
	}
302
303
	// default to enable metal on macOS
	return 1
304
305
}

306
307
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
308
309
	ErrCh      chan error
	LastErrMsg string
310
311
312
313
314
315
316
317
318
}

func NewStatusWriter() *StatusWriter {
	return &StatusWriter{
		ErrCh: make(chan error, 1),
	}
}

func (w *StatusWriter) Write(b []byte) (int, error) {
319
	var errMsg string
320
	if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
321
322
323
		errMsg = string(bytes.TrimSpace(after))
	} else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
		errMsg = string(bytes.TrimSpace(after))
324
	}
325
326
327
328
329
330

	if errMsg != "" {
		w.LastErrMsg = errMsg
		w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
	}

331
332
333
	return os.Stderr.Write(b)
}

Michael Yang's avatar
Michael Yang committed
334
func newLlama(model string, adapters, projectors []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
335
336
	fileInfo, err := os.Stat(model)
	if err != nil {
337
338
339
340
341
342
343
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

344
	numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
345
346
347
348
	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
349
		"--n-gpu-layers", fmt.Sprintf("%d", numGPU),
350
		"--embedding",
Bruce MacDonald's avatar
Bruce MacDonald committed
351
		"--parallel", "2",
352
353
	}

354
355
356
357
	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

358
359
360
361
362
363
364
365
	if opts.RopeFrequencyBase > 0 {
		params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
	}

	if opts.RopeFrequencyScale > 0 {
		params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
366
367
368
369
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

370
371
372
373
374
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

Michael Yang's avatar
Michael Yang committed
375
376
377
378
379
	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

397
398
	var runnerErr error

399
	// start the llama.cpp server with a retry in case the port is already in use
400
	for _, runner := range runners {
401
402
403
404
405
		if runner.Accelerated && numGPU == 0 {
			log.Printf("skipping accelerated runner because num_gpu=0")
			continue
		}

406
407
408
409
410
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

411
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
412
413
		params := append(params, "--port", strconv.Itoa(port))

414
415
416
417
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
418
			params...,
419
		)
Michael Yang's avatar
Michael Yang committed
420
421
422
423
424
425
426
427
428

		var libraryPaths []string
		if libraryPath, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
			libraryPaths = append(libraryPaths, libraryPath)
		}

		libraryPaths = append(libraryPaths, filepath.Dir(runner.Path))

		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", strings.Join(libraryPaths, ":")))
429
		cmd.Stdout = os.Stderr
430
431
		statusWriter := NewStatusWriter()
		cmd.Stderr = statusWriter
432

433
		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
434

435
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
436
		if err := llm.Cmd.Start(); err != nil {
437
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
438
439
440
			continue
		}

441
		// monitor the llama runner process and signal when it exits
442
		go func() {
443
			err := llm.Cmd.Wait()
444
445
446
447
448
449
450
			// default to printing the exit message of the command process, it will probably just say 'exit staus 1'
			errMsg := err.Error()
			// try to set a better error message if llama runner logs captured an error
			if statusWriter.LastErrMsg != "" {
				errMsg = statusWriter.LastErrMsg
			}
			log.Println(errMsg)
451
452
453
454
			// llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
			llm.exitOnce.Do(func() {
				close(llm.exitCh)
			})
455
456
		}()

457
		if err := waitForServer(llm); err != nil {
458
			log.Printf("error starting llama runner: %v", err)
459
			llm.Close()
460
461
462
463
464
465
466
467
468
469
470

			// default the runnerErr to the error returned by the most recent llama runner process
			runnerErr = err

			// capture the error directly from the runner process, if any
			select {
			case runnerErr = <-statusWriter.ErrCh:
			default:
				// the runner process probably timed out
			}

471
472
473
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
474

475
476
477
478
		// server started successfully
		return llm, nil
	}

479
480
481
482
483
	if runnerErr != nil {
		// this is the error returned from the llama runner process that failed most recently
		return nil, runnerErr
	}

484
	return nil, fmt.Errorf("failed to start a llama runner")
485
486
487
488
}

func waitForServer(llm *llama) error {
	start := time.Now()
489
	expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
490
	ticker := time.NewTicker(200 * time.Millisecond)
491
	defer ticker.Stop()
492

493
	log.Print("waiting for llama runner to start responding")
494
495
496
497
	for {
		select {
		case <-llm.exitCh:
			// failed to start subprocess
498
			return fmt.Errorf("llama runner process has terminated")
499
500
501
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
502
				return fmt.Errorf("timed out waiting for llama runner to start")
503
			}
504

505
506
507
508
509
			if err := llm.Ping(context.Background()); err == nil {
				// success
				log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
				return nil
			}
510
511
512
513
514
		}
	}
}

func (llm *llama) Close() {
515
	// signal the sub-process to terminate
Bruce MacDonald's avatar
Bruce MacDonald committed
516
	llm.Cancel()
517
518

	// wait for the command to exit to prevent race conditions with the next run
519
520
	<-llm.exitCh

521
522
	if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
		log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
523
524
	} else {
		log.Print("llama runner stopped successfully")
525
	}
526
527
528
529
530
531
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
532
type prediction struct {
Michael Yang's avatar
Michael Yang committed
533
534
535
536
537
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

Michael Yang's avatar
Michael Yang committed
538
539
540
541
542
543
	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
544
545
}

Michael Yang's avatar
Michael Yang committed
546
const maxBufferSize = 512 * format.KiloByte
Bruce MacDonald's avatar
Bruce MacDonald committed
547
const maxRetries = 6
548

Bruce MacDonald's avatar
Bruce MacDonald committed
549
type PredictOpts struct {
550
551
552
	Prompt string
	Format string
	Images []api.ImageData
Bruce MacDonald's avatar
Bruce MacDonald committed
553
}
554

Bruce MacDonald's avatar
Bruce MacDonald committed
555
556
557
558
559
560
561
562
type PredictResult struct {
	Content            string
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}
563

564
565
566
567
568
// IsRetryable checks if the line matches a condition that can be retried
func isRetryable(line []byte) bool {
	return bytes.Contains(line, []byte("slot unavailable"))
}

Bruce MacDonald's avatar
Bruce MacDonald committed
569
func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(PredictResult)) error {
Patrick Devine's avatar
Patrick Devine committed
570
571
572
573
574
575
576
577
	imageData := llm.ImageData
	if len(predict.Images) > 0 {
		for cnt, i := range predict.Images {
			imageData = append(imageData, ImageData{Data: i, ID: cnt})
		}
	}
	log.Printf("loaded %d images", len(imageData))

Michael Yang's avatar
Michael Yang committed
578
	request := map[string]any{
Bruce MacDonald's avatar
Bruce MacDonald committed
579
		"prompt":            predict.Prompt,
Michael Yang's avatar
Michael Yang committed
580
581
582
		"stream":            true,
		"n_predict":         llm.NumPredict,
		"n_keep":            llm.NumKeep,
583
		"main_gpu":          llm.MainGPU,
Michael Yang's avatar
Michael Yang committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
		"temperature":       llm.Temperature,
		"top_k":             llm.TopK,
		"top_p":             llm.TopP,
		"tfs_z":             llm.TFSZ,
		"typical_p":         llm.TypicalP,
		"repeat_last_n":     llm.RepeatLastN,
		"repeat_penalty":    llm.RepeatPenalty,
		"presence_penalty":  llm.PresencePenalty,
		"frequency_penalty": llm.FrequencyPenalty,
		"mirostat":          llm.Mirostat,
		"mirostat_tau":      llm.MirostatTau,
		"mirostat_eta":      llm.MirostatEta,
		"penalize_nl":       llm.PenalizeNewline,
		"seed":              llm.Seed,
		"stop":              llm.Stop,
Patrick Devine's avatar
Patrick Devine committed
599
		"image_data":        imageData,
600
	}
601

Bruce MacDonald's avatar
Bruce MacDonald committed
602
	if predict.Format == "json" {
603
604
605
		request["grammar"] = jsonGrammar
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
606
	retryDelay := 100 * time.Microsecond
607
608
609
	for retries := 0; retries < maxRetries; retries++ {
		if retries > 0 {
			time.Sleep(retryDelay) // wait before retrying
Bruce MacDonald's avatar
Bruce MacDonald committed
610
			retryDelay *= 2        // exponential backoff
611
		}
612

613
614
615
616
		// Handling JSON marshaling with special characters unescaped.
		buffer := &bytes.Buffer{}
		enc := json.NewEncoder(buffer)
		enc.SetEscapeHTML(false)
617

618
619
620
		if err := enc.Encode(request); err != nil {
			return fmt.Errorf("failed to marshal data: %v", err)
		}
621

622
623
624
625
626
627
		endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
		req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
		if err != nil {
			return fmt.Errorf("error creating POST request: %v", err)
		}
		req.Header.Set("Content-Type", "application/json")
628

629
		resp, err := http.DefaultClient.Do(req)
630
		if err != nil {
631
			return fmt.Errorf("POST predict: %v", err)
632
		}
633
		defer resp.Body.Close()
634

635
636
637
638
		if resp.StatusCode >= 400 {
			bodyBytes, err := io.ReadAll(resp.Body)
			if err != nil {
				return fmt.Errorf("failed reading llm error response: %w", err)
639
			}
640
641
642
			log.Printf("llm predict error: %s", bodyBytes)
			return fmt.Errorf("%s", bodyBytes)
		}
643

644
645
646
647
		scanner := bufio.NewScanner(resp.Body)
		// increase the buffer size to avoid running out of space
		buf := make([]byte, 0, maxBufferSize)
		scanner.Buffer(buf, maxBufferSize)
648

649
650
651
652
653
654
655
656
657
658
659
		retryNeeded := false
		for scanner.Scan() {
			select {
			case <-ctx.Done():
				// This handles the request cancellation
				return ctx.Err()
			default:
				line := scanner.Bytes()
				if len(line) == 0 {
					continue
				}
Michael Yang's avatar
Michael Yang committed
660

661
662
663
664
				if isRetryable(line) {
					retryNeeded = true
					break
				}
665

666
667
668
669
670
671
672
673
674
675
676
677
				evt, ok := bytes.CutPrefix(line, []byte("data: "))
				if !ok {
					return fmt.Errorf("error parsing llm response stream: %s", line)
				}

				var p prediction
				if err := json.Unmarshal(evt, &p); err != nil {
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
				}

				if p.Content != "" {
					fn(PredictResult{
678
						Content: p.Content,
679
680
681
682
683
684
685
686
687
688
689
690
691
					})
				}

				if p.Stop {
					fn(PredictResult{
						Done:               true,
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
					})
					return nil
				}
692
693
694
			}
		}

695
696
697
698
699
700
701
702
		if err := scanner.Err(); err != nil {
			if strings.Contains(err.Error(), "unexpected EOF") {
				// this means the llama runner subprocess crashed
				llm.Close()
				if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
					return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
				}
				return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
703
			}
704
705
706
707
708
			return fmt.Errorf("error reading llm response: %v", err)
		}

		if !retryNeeded {
			return nil // success
709
		}
710
711
	}

712
713
	// should never reach here ideally
	return fmt.Errorf("max retries exceeded")
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
856
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
857
858
859
860
861
862
863
864
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}