llama.go 21 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
23
	"sync"
24
25
26
27
28
	"time"

	"github.com/jmorganca/ollama/api"
)

Bruce MacDonald's avatar
Bruce MacDonald committed
29
//go:embed llama.cpp/*/build/*/bin/*
30
31
var llamaCppEmbed embed.FS

32
type ModelRunner struct {
33
34
	Path        string // path to the model runner executable
	Accelerated bool
35
}
36

37
func chooseRunners(workDir, runnerType string) []ModelRunner {
38
	buildPath := path.Join("llama.cpp", runnerType, "build")
39
	var runners []ModelRunner
40

41
42
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
43
44
	switch runtime.GOOS {
	case "darwin":
45
46
47
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
48
		}
49
	case "linux":
50
51
52
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
53
54
55
		}
	case "windows":
		// TODO: select windows GPU runner here when available
56
57
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
58
		}
59
60
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
61
62
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
63
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
64
	}
65

66
67
68
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
69
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
70
		if err != nil {
71
72
73
			// this is expected, ollama may be compiled without all runners packed in
			log.Printf("%s runner not found: %v", r, err)
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
74
		}
75

76
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
77
78
			runnerAvailable = true

79
80
81
82
83
84
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
85
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
86
			destPath := filepath.Join(workDir, filepath.Dir(f))
87
88
89
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
90

Bruce MacDonald's avatar
Bruce MacDonald committed
91
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
108
			}
109
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
110
	}
111
112
113
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
114

115
116
117
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
118
		// clean the ModelRunner paths so that they match the OS we are running on
119
120
121
122
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
			Path:        filepath.Clean(path.Join(workDir, r.Path)),
			Accelerated: r.Accelerated,
		})
Bruce MacDonald's avatar
Bruce MacDonald committed
123
	}
124

125
	return localRunnersByPriority
126
127
128
129
130
131
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
132
133
func (llm *llamaModel) ModelFamily() string {
	return "llama"
134
135
}

Michael Yang's avatar
Michael Yang committed
136
137
func llamaModelType(numLayer uint32) string {
	switch numLayer {
138
	case 26:
Michael Yang's avatar
Michael Yang committed
139
		return "3B"
140
	case 32:
Michael Yang's avatar
Michael Yang committed
141
		return "7B"
142
	case 40:
Michael Yang's avatar
Michael Yang committed
143
		return "13B"
144
	case 48:
Michael Yang's avatar
Michael Yang committed
145
		return "34B"
146
	case 60:
Michael Yang's avatar
Michael Yang committed
147
		return "30B"
148
	case 80:
Michael Yang's avatar
Michael Yang committed
149
150
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
151
		return "unknown"
152
	}
Michael Yang's avatar
Michael Yang committed
153
}
154

Michael Yang's avatar
Michael Yang committed
155
156
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
157
158
}

Michael Yang's avatar
Michael Yang committed
159
160
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
161
162
}

163
164
165
166
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

167
168
169
170
171
172
173
174
175
176
177
178
179
180
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
181
	FileType uint32
182
183
184
}

type Running struct {
185
186
187
188
189
190
	Port     int
	Cmd      *exec.Cmd
	Cancel   context.CancelFunc
	exitOnce sync.Once
	exitCh   chan error // channel to receive the exit status of the subprocess
	exitErr  error      // error returned by the subprocess
191
192
193
194
195
196
197
}

type llama struct {
	api.Options
	Running
}

198
199
200
var errNoGPU = errors.New("nvidia-smi command failed")

// CheckVRAM returns the available VRAM in MiB on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
201
func CheckVRAM() (int64, error) {
202
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
203
204
205
206
207
208
209
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
		return 0, errNoGPU
	}

210
	var free int64
211
212
213
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
Michael Yang's avatar
Michael Yang committed
214
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
215
216
217
218
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

219
		free += vram
220
221
	}

Michael Yang's avatar
Michael Yang committed
222
223
224
225
226
	if free*1024*1024 < 2*1000*1000*1000 {
		log.Printf("less than 2 GB VRAM available, falling back to CPU only")
		free = 0
	}

227
	return free, nil
228
229
}

230
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
231
232
233
234
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
	if runtime.GOOS == "linux" {
235
		vramMib, err := CheckVRAM()
236
237
238
239
240
241
242
		if err != nil {
			if err.Error() != "nvidia-smi command failed" {
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
243

244
		freeVramBytes := int64(vramMib) * 1024 * 1024 // 1 MiB = 1024^2 bytes
245
246
247
248
249

		// Calculate bytes per layer
		// TODO: this is a rough heuristic, better would be to calculate this based on number of layers and context size
		bytesPerLayer := fileSizeBytes / numLayer

250
251
		// max number of layers we can fit in VRAM, subtract 8% to prevent consuming all available VRAM and running out of memory
		layers := int(freeVramBytes/bytesPerLayer) * 92 / 100
252
		log.Printf("%d MiB VRAM available, loading up to %d GPU layers", vramMib, layers)
253

254
		return layers
255
	}
256
257
	// default to enable metal on macOS
	return 1
258
259
}

260
261
262
263
264
265
266
267
268
269
270
271
272
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
	ErrCh chan error
}

func NewStatusWriter() *StatusWriter {
	return &StatusWriter{
		ErrCh: make(chan error, 1),
	}
}

func (w *StatusWriter) Write(b []byte) (int, error) {
	if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
273
		w.ErrCh <- fmt.Errorf("llama runner: %s", bytes.TrimSpace(after))
274
275
276
277
	}
	return os.Stderr.Write(b)
}

278
279
280
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
	fileInfo, err := os.Stat(model)
	if err != nil {
281
282
283
284
285
286
287
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

288
	numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
289
290
291
292
293
294
295
296
297
	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
		"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}

298
299
300
301
	if numGPU > 0 {
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", numGPU))
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
302
303
304
305
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

328
329
	var runnerErr error

330
	// start the llama.cpp server with a retry in case the port is already in use
331
	for _, runner := range runners {
332
333
334
335
336
		if runner.Accelerated && numGPU == 0 {
			log.Printf("skipping accelerated runner because num_gpu=0")
			continue
		}

337
338
339
340
341
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

342
343
344
345
346
347
348
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Bruce MacDonald's avatar
Bruce MacDonald committed
349
		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", filepath.Dir(runner.Path)))
350
		cmd.Stdout = os.Stderr
351
352
		statusWriter := NewStatusWriter()
		cmd.Stderr = statusWriter
353

354
		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
355

356
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
357
		if err := llm.Cmd.Start(); err != nil {
358
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
359
360
361
			continue
		}

362
		// monitor the llama runner process and signal when it exits
363
		go func() {
364
365
366
367
368
369
			err := llm.Cmd.Wait()
			llm.exitErr = err
			// llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
			llm.exitOnce.Do(func() {
				close(llm.exitCh)
			})
370
371
		}()

372
		if err := waitForServer(llm); err != nil {
373
			log.Printf("error starting llama runner: %v", err)
374
			llm.Close()
375
376
377
378
379
380
381
382
383
384
385

			// default the runnerErr to the error returned by the most recent llama runner process
			runnerErr = err

			// capture the error directly from the runner process, if any
			select {
			case runnerErr = <-statusWriter.ErrCh:
			default:
				// the runner process probably timed out
			}

386
387
388
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
389

390
391
392
393
		// server started successfully
		return llm, nil
	}

394
395
396
397
398
	if runnerErr != nil {
		// this is the error returned from the llama runner process that failed most recently
		return nil, runnerErr
	}

399
	return nil, fmt.Errorf("failed to start a llama runner")
400
401
402
403
}

func waitForServer(llm *llama) error {
	start := time.Now()
404
	expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
405
	ticker := time.NewTicker(200 * time.Millisecond)
406
	defer ticker.Stop()
407

408
	log.Print("waiting for llama runner to start responding")
409
410
411
412
	for {
		select {
		case <-llm.exitCh:
			// failed to start subprocess
413
			return fmt.Errorf("llama runner process has terminated")
414
415
416
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
417
				return fmt.Errorf("timed out waiting for llama runner to start")
418
			}
419

420
421
422
423
424
			if err := llm.Ping(context.Background()); err == nil {
				// success
				log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
				return nil
			}
425
426
427
428
429
		}
	}
}

func (llm *llama) Close() {
430
	// signal the sub-process to terminate
Bruce MacDonald's avatar
Bruce MacDonald committed
431
	llm.Cancel()
432
433

	// wait for the command to exit to prevent race conditions with the next run
434
435
436
437
438
439
440
	<-llm.exitCh
	err := llm.exitErr

	if err != nil {
		log.Printf("llama runner stopped with error: %v", err)
	} else {
		log.Print("llama runner stopped successfully")
441
	}
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

type GenerationSettings struct {
	FrequencyPenalty float64       `json:"frequency_penalty"`
	IgnoreEOS        bool          `json:"ignore_eos"`
	LogitBias        []interface{} `json:"logit_bias"`
	Mirostat         int           `json:"mirostat"`
	MirostatEta      float64       `json:"mirostat_eta"`
	MirostatTau      float64       `json:"mirostat_tau"`
	Model            string        `json:"model"`
	NCtx             int           `json:"n_ctx"`
	NKeep            int           `json:"n_keep"`
	NPredict         int           `json:"n_predict"`
	NProbs           int           `json:"n_probs"`
	PenalizeNl       bool          `json:"penalize_nl"`
	PresencePenalty  float64       `json:"presence_penalty"`
	RepeatLastN      int           `json:"repeat_last_n"`
	RepeatPenalty    float64       `json:"repeat_penalty"`
	Seed             uint32        `json:"seed"`
	Stop             []string      `json:"stop"`
	Stream           bool          `json:"stream"`
	Temp             float64       `json:"temp"`
	TfsZ             float64       `json:"tfs_z"`
	TopK             int           `json:"top_k"`
	TopP             float64       `json:"top_p"`
	TypicalP         float64       `json:"typical_p"`
}

type Timings struct {
Michael Yang's avatar
Michael Yang committed
475
476
477
478
	PredictedN  int     `json:"predicted_n"`
	PredictedMS float64 `json:"predicted_ms"`
	PromptN     int     `json:"prompt_n"`
	PromptMS    float64 `json:"prompt_ms"`
479
480
}

Michael Yang's avatar
Michael Yang committed
481
482
483
484
485
486
487
type Prediction struct {
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

	Timings `json:"timings"`
488
489
490
}

type PredictRequest struct {
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
	Prompt           string   `json:"prompt"`
	Stream           bool     `json:"stream"`
	NPredict         int      `json:"n_predict"`
	NKeep            int      `json:"n_keep"`
	Temperature      float32  `json:"temperature"`
	TopK             int      `json:"top_k"`
	TopP             float32  `json:"top_p"`
	TfsZ             float32  `json:"tfs_z"`
	TypicalP         float32  `json:"typical_p"`
	RepeatLastN      int      `json:"repeat_last_n"`
	RepeatPenalty    float32  `json:"repeat_penalty"`
	PresencePenalty  float32  `json:"presence_penalty"`
	FrequencyPenalty float32  `json:"frequency_penalty"`
	Mirostat         int      `json:"mirostat"`
	MirostatTau      float32  `json:"mirostat_tau"`
	MirostatEta      float32  `json:"mirostat_eta"`
	PenalizeNl       bool     `json:"penalize_nl"`
	Seed             int      `json:"seed"`
	Stop             []string `json:"stop,omitempty"`
510
511
}

Michael Yang's avatar
Michael Yang committed
512
const maxBufferSize = 512 * format.KiloByte
513

514
515
func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, fn func(api.GenerateResponse)) error {
	prevConvo, err := llm.Decode(ctx, prevContext)
516
	if err != nil {
517
		return err
518
	}
519
520
521
522
523

	var nextContext strings.Builder
	nextContext.WriteString(prevConvo)
	nextContext.WriteString(prompt)

524
525
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
	predReq := PredictRequest{
526
		Prompt:           nextContext.String(),
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
		Stream:           true,
		NPredict:         llm.NumPredict,
		NKeep:            llm.NumKeep,
		Temperature:      llm.Temperature,
		TopK:             llm.TopK,
		TopP:             llm.TopP,
		TfsZ:             llm.TFSZ,
		TypicalP:         llm.TypicalP,
		RepeatLastN:      llm.RepeatLastN,
		RepeatPenalty:    llm.RepeatPenalty,
		PresencePenalty:  llm.PresencePenalty,
		FrequencyPenalty: llm.FrequencyPenalty,
		Mirostat:         llm.Mirostat,
		MirostatTau:      llm.MirostatTau,
		MirostatEta:      llm.MirostatEta,
		PenalizeNl:       llm.PenalizeNewline,
543
		Seed:             llm.Seed,
544
545
		Stop:             llm.Stop,
	}
546

547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
	data, err := json.Marshal(predReq)
	if err != nil {
		return fmt.Errorf("error marshaling data: %v", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
574
575
576
	// increase the buffer size to avoid running out of space
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
577
578
579
580
581
582
583
584
585
586
587
588
589
590
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Text()
			if line == "" {
				continue
			}

			// Read data from the server-side event stream
			if strings.HasPrefix(line, "data: ") {
				evt := line[6:]
Michael Yang's avatar
Michael Yang committed
591
592
593
				var p Prediction
				if err := json.Unmarshal([]byte(evt), &p); err != nil {
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
594
595
				}

Michael Yang's avatar
Michael Yang committed
596
597
598
599
				if p.Content != "" {
					fn(api.GenerateResponse{Response: p.Content})
					nextContext.WriteString(p.Content)
				}
Michael Yang's avatar
Michael Yang committed
600
601

				if p.Stop {
602
					embd, err := llm.Encode(ctx, nextContext.String())
603
604
605
					if err != nil {
						return fmt.Errorf("encoding context: %v", err)
					}
606

607
608
609
					fn(api.GenerateResponse{
						Done:               true,
						Context:            embd,
Michael Yang's avatar
Michael Yang committed
610
611
612
613
						PromptEvalCount:    p.PromptN,
						PromptEvalDuration: parseDurationMs(p.PromptMS),
						EvalCount:          p.PredictedN,
						EvalDuration:       parseDurationMs(p.PredictedMS),
614
615
					})

Michael Yang's avatar
Michael Yang committed
616
					return nil
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	// decoded content contains a leading whitespace
	decoded.Content, _ = strings.CutPrefix(decoded.Content, "")

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
772
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
773
774
775
776
777
778
779
780
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}