llama.go 21.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
23
	"sync"
24
25
26
	"time"

	"github.com/jmorganca/ollama/api"
Michael Yang's avatar
Michael Yang committed
27
	"github.com/jmorganca/ollama/format"
28
29
)

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

Bruce MacDonald's avatar
Bruce MacDonald committed
58
//go:embed llama.cpp/*/build/*/bin/*
59
60
var llamaCppEmbed embed.FS

61
type ModelRunner struct {
62
63
	Path        string // path to the model runner executable
	Accelerated bool
64
}
65

66
func chooseRunners(workDir, runnerType string) []ModelRunner {
67
	buildPath := path.Join("llama.cpp", runnerType, "build")
68
	var runners []ModelRunner
69

70
71
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
72
73
	switch runtime.GOOS {
	case "darwin":
74
75
76
77
		if runtime.GOARCH == "arm64" {
			runners = []ModelRunner{{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")}}
		} else {
			runners = []ModelRunner{{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")}}
78
		}
79
	case "linux":
80
81
82
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
83
84
85
		}
	case "windows":
		// TODO: select windows GPU runner here when available
86
87
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
88
		}
89
90
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
91
92
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
93
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
94
	}
95

96
97
98
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
99
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
100
		if err != nil {
101
			// this is expected, ollama may be compiled without all runners packed in
Michael Yang's avatar
Michael Yang committed
102
			log.Printf("%s runner not found: %v", r.Path, err)
103
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
104
		}
105

106
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
107
108
			runnerAvailable = true

109
110
111
112
113
114
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
115
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
116
			destPath := filepath.Join(workDir, filepath.Dir(f))
117
118
119
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
120

Bruce MacDonald's avatar
Bruce MacDonald committed
121
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
138
			}
139
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
140
	}
141
142
143
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
144

145
146
147
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
148
		// clean the ModelRunner paths so that they match the OS we are running on
149
150
151
152
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
			Path:        filepath.Clean(path.Join(workDir, r.Path)),
			Accelerated: r.Accelerated,
		})
Bruce MacDonald's avatar
Bruce MacDonald committed
153
	}
154

155
	return localRunnersByPriority
156
157
158
159
160
161
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
162
163
func (llm *llamaModel) ModelFamily() string {
	return "llama"
164
165
}

Michael Yang's avatar
Michael Yang committed
166
167
func llamaModelType(numLayer uint32) string {
	switch numLayer {
168
	case 26:
Michael Yang's avatar
Michael Yang committed
169
		return "3B"
170
	case 32:
Michael Yang's avatar
Michael Yang committed
171
		return "7B"
172
	case 40:
Michael Yang's avatar
Michael Yang committed
173
		return "13B"
174
	case 48:
Michael Yang's avatar
Michael Yang committed
175
		return "34B"
176
	case 60:
Michael Yang's avatar
Michael Yang committed
177
		return "30B"
178
	case 80:
Michael Yang's avatar
Michael Yang committed
179
180
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
181
		return "unknown"
182
	}
Michael Yang's avatar
Michael Yang committed
183
}
184

Michael Yang's avatar
Michael Yang committed
185
186
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
187
188
}

Michael Yang's avatar
Michael Yang committed
189
190
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
191
192
}

193
194
195
196
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

197
198
199
200
201
202
203
204
205
206
207
208
209
210
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
211
	FileType uint32
212
213
214
}

type Running struct {
215
216
217
218
219
220
	Port          int
	Cmd           *exec.Cmd
	Cancel        context.CancelFunc
	exitOnce      sync.Once
	exitCh        chan error // channel to receive the exit status of the subprocess
	*StatusWriter            // captures error messages from the llama runner process
221
222
223
224
225
226
227
}

type llama struct {
	api.Options
	Running
}

228
var (
Jeffrey Morgan's avatar
Jeffrey Morgan committed
229
	errNvidiaSMI     = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
230
231
	errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
)
232

Michael Yang's avatar
Michael Yang committed
233
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
234
func CheckVRAM() (int64, error) {
235
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
236
237
238
239
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
240
		return 0, errNvidiaSMI
241
242
	}

Michael Yang's avatar
Michael Yang committed
243
	var freeMiB int64
244
245
246
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
247
248
249
250
		if strings.Contains(line, "[Insufficient Permissions]") {
			return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
		}

Michael Yang's avatar
Michael Yang committed
251
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
252
253
254
255
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

Michael Yang's avatar
Michael Yang committed
256
		freeMiB += vram
257
258
	}

Michael Yang's avatar
Michael Yang committed
259
260
	freeBytes := freeMiB * 1024 * 1024
	if freeBytes < 2*format.GigaByte {
261
262
		log.Printf("less than 2 GB VRAM available")
		return 0, errAvailableVRAM
Michael Yang's avatar
Michael Yang committed
263
264
	}

Michael Yang's avatar
Michael Yang committed
265
	return freeBytes, nil
266
267
}

268
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
269
270
271
272
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
	if runtime.GOOS == "linux" {
Michael Yang's avatar
Michael Yang committed
273
		freeBytes, err := CheckVRAM()
274
		if err != nil {
275
			if !errors.Is(err, errNvidiaSMI) {
276
277
278
279
280
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
281

282
283
284
285
286
		/*
		 Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
		 We can store the model weights and the kv cache in vram,
		 to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
		*/
287
288
		bytesPerLayer := fileSizeBytes / numLayer

289
290
		// 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
		layers := int(freeBytes/bytesPerLayer) * 3 / 4
291
		log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
292

293
		return layers
294
	}
295
296
	// default to enable metal on macOS
	return 1
297
298
}

299
300
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
301
302
	ErrCh      chan error
	LastErrMsg string
303
304
305
306
307
308
309
310
311
}

func NewStatusWriter() *StatusWriter {
	return &StatusWriter{
		ErrCh: make(chan error, 1),
	}
}

func (w *StatusWriter) Write(b []byte) (int, error) {
312
	var errMsg string
313
	if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
314
315
316
		errMsg = string(bytes.TrimSpace(after))
	} else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
		errMsg = string(bytes.TrimSpace(after))
317
	}
318
319
320
321
322
323

	if errMsg != "" {
		w.LastErrMsg = errMsg
		w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
	}

324
325
326
	return os.Stderr.Write(b)
}

327
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
328
329
	fileInfo, err := os.Stat(model)
	if err != nil {
330
331
332
333
334
335
336
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

337
	numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
338
339
340
341
	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
342
		"--n-gpu-layers", fmt.Sprintf("%d", numGPU),
343
344
345
		"--embedding",
	}

346
347
348
349
	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

350
351
352
353
354
355
356
357
	if opts.RopeFrequencyBase > 0 {
		params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
	}

	if opts.RopeFrequencyScale > 0 {
		params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
358
359
360
361
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

384
385
	var runnerErr error

386
	// start the llama.cpp server with a retry in case the port is already in use
387
	for _, runner := range runners {
388
389
390
391
392
		if runner.Accelerated && numGPU == 0 {
			log.Printf("skipping accelerated runner because num_gpu=0")
			continue
		}

393
394
395
396
397
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

398
399
400
401
402
403
404
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Michael Yang's avatar
Michael Yang committed
405
406
407
408
409
410
411
412
413

		var libraryPaths []string
		if libraryPath, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
			libraryPaths = append(libraryPaths, libraryPath)
		}

		libraryPaths = append(libraryPaths, filepath.Dir(runner.Path))

		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", strings.Join(libraryPaths, ":")))
414
		cmd.Stdout = os.Stderr
415
416
		statusWriter := NewStatusWriter()
		cmd.Stderr = statusWriter
417

418
		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
419

420
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
421
		if err := llm.Cmd.Start(); err != nil {
422
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
423
424
425
			continue
		}

426
		// monitor the llama runner process and signal when it exits
427
		go func() {
428
			err := llm.Cmd.Wait()
429
430
431
432
433
434
435
			// default to printing the exit message of the command process, it will probably just say 'exit staus 1'
			errMsg := err.Error()
			// try to set a better error message if llama runner logs captured an error
			if statusWriter.LastErrMsg != "" {
				errMsg = statusWriter.LastErrMsg
			}
			log.Println(errMsg)
436
437
438
439
			// llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
			llm.exitOnce.Do(func() {
				close(llm.exitCh)
			})
440
441
		}()

442
		if err := waitForServer(llm); err != nil {
443
			log.Printf("error starting llama runner: %v", err)
444
			llm.Close()
445
446
447
448
449
450
451
452
453
454
455

			// default the runnerErr to the error returned by the most recent llama runner process
			runnerErr = err

			// capture the error directly from the runner process, if any
			select {
			case runnerErr = <-statusWriter.ErrCh:
			default:
				// the runner process probably timed out
			}

456
457
458
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
459

460
461
462
463
		// server started successfully
		return llm, nil
	}

464
465
466
467
468
	if runnerErr != nil {
		// this is the error returned from the llama runner process that failed most recently
		return nil, runnerErr
	}

469
	return nil, fmt.Errorf("failed to start a llama runner")
470
471
472
473
}

func waitForServer(llm *llama) error {
	start := time.Now()
474
	expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
475
	ticker := time.NewTicker(200 * time.Millisecond)
476
	defer ticker.Stop()
477

478
	log.Print("waiting for llama runner to start responding")
479
480
481
482
	for {
		select {
		case <-llm.exitCh:
			// failed to start subprocess
483
			return fmt.Errorf("llama runner process has terminated")
484
485
486
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
487
				return fmt.Errorf("timed out waiting for llama runner to start")
488
			}
489

490
491
492
493
494
			if err := llm.Ping(context.Background()); err == nil {
				// success
				log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
				return nil
			}
495
496
497
498
499
		}
	}
}

func (llm *llama) Close() {
500
	// signal the sub-process to terminate
Bruce MacDonald's avatar
Bruce MacDonald committed
501
	llm.Cancel()
502
503

	// wait for the command to exit to prevent race conditions with the next run
504
505
	<-llm.exitCh

506
507
	if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
		log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
508
509
	} else {
		log.Print("llama runner stopped successfully")
510
	}
511
512
513
514
515
516
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
517
type prediction struct {
Michael Yang's avatar
Michael Yang committed
518
519
520
521
522
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

Michael Yang's avatar
Michael Yang committed
523
524
525
526
527
528
	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
529
530
}

Michael Yang's avatar
Michael Yang committed
531
const maxBufferSize = 512 * format.KiloByte
532

533
func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, format string, fn func(api.GenerateResponse)) error {
534
	prevConvo, err := llm.Decode(ctx, prevContext)
535
	if err != nil {
536
		return err
537
	}
538

539
	// Remove leading spaces from prevConvo if present
540
	prevConvo = strings.TrimPrefix(prevConvo, " ")
541

542
543
544
545
	var nextContext strings.Builder
	nextContext.WriteString(prevConvo)
	nextContext.WriteString(prompt)

Michael Yang's avatar
Michael Yang committed
546
547
548
549
550
	request := map[string]any{
		"prompt":            nextContext.String(),
		"stream":            true,
		"n_predict":         llm.NumPredict,
		"n_keep":            llm.NumKeep,
551
		"main_gpu":          llm.MainGPU,
Michael Yang's avatar
Michael Yang committed
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
		"temperature":       llm.Temperature,
		"top_k":             llm.TopK,
		"top_p":             llm.TopP,
		"tfs_z":             llm.TFSZ,
		"typical_p":         llm.TypicalP,
		"repeat_last_n":     llm.RepeatLastN,
		"repeat_penalty":    llm.RepeatPenalty,
		"presence_penalty":  llm.PresencePenalty,
		"frequency_penalty": llm.FrequencyPenalty,
		"mirostat":          llm.Mirostat,
		"mirostat_tau":      llm.MirostatTau,
		"mirostat_eta":      llm.MirostatEta,
		"penalize_nl":       llm.PenalizeNewline,
		"seed":              llm.Seed,
		"stop":              llm.Stop,
567
	}
568

569
570
571
572
	if format == "json" {
		request["grammar"] = jsonGrammar
	}

573
	// Handling JSON marshaling with special characters unescaped.
574
575
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
576
577
	enc.SetEscapeHTML(false)

Michael Yang's avatar
Michael Yang committed
578
	if err := enc.Encode(request); err != nil {
579
		return fmt.Errorf("failed to marshal data: %v", err)
580
581
	}

Michael Yang's avatar
Michael Yang committed
582
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
583
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
605
606
607
	// increase the buffer size to avoid running out of space
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
608
609
610
611
612
613
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
Michael Yang's avatar
Michael Yang committed
614
615
			line := scanner.Bytes()
			if len(line) == 0 {
616
617
618
				continue
			}

Michael Yang's avatar
Michael Yang committed
619
			if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
Michael Yang's avatar
Michael Yang committed
620
				var p prediction
Michael Yang's avatar
Michael Yang committed
621
				if err := json.Unmarshal(evt, &p); err != nil {
Michael Yang's avatar
Michael Yang committed
622
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
623
624
				}

Michael Yang's avatar
Michael Yang committed
625
626
627
628
				if p.Content != "" {
					fn(api.GenerateResponse{Response: p.Content})
					nextContext.WriteString(p.Content)
				}
Michael Yang's avatar
Michael Yang committed
629
630

				if p.Stop {
631
					embd, err := llm.Encode(ctx, nextContext.String())
632
633
634
					if err != nil {
						return fmt.Errorf("encoding context: %v", err)
					}
635

636
637
638
					fn(api.GenerateResponse{
						Done:               true,
						Context:            embd,
Michael Yang's avatar
Michael Yang committed
639
640
641
642
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
643
644
					})

Michael Yang's avatar
Michael Yang committed
645
					return nil
646
647
648
649
650
651
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
652
653
654
655
656
657
658
659
		if strings.Contains(err.Error(), "unexpected EOF") {
			// this means the llama runner subprocess crashed
			llm.Close()
			if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
				return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
			}
			return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
		}
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
806
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
807
808
809
810
811
812
813
814
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}