llama.go 20.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
20
	"regexp"
21
22
23
24
25
26
27
28
	"runtime"
	"strconv"
	"strings"
	"time"

	"github.com/jmorganca/ollama/api"
)

Bruce MacDonald's avatar
Bruce MacDonald committed
29
//go:embed llama.cpp/*/build/*/bin/*
30
31
var llamaCppEmbed embed.FS

32
func cudaVersion() int {
33
34
35
36
37
38
39
40
41
42
43
44
	// first try nvcc, it gives the most accurate version if available
	cmd := exec.Command("nvcc", "--version")
	output, err := cmd.CombinedOutput()
	if err == nil {
		// regex to match the CUDA version line in nvcc --version output
		re := regexp.MustCompile(`release (\d+\.\d+),`)
		matches := re.FindStringSubmatch(string(output))
		if len(matches) >= 2 {
			cudaVersion := matches[1]
			cudaVersionParts := strings.Split(cudaVersion, ".")
			cudaMajorVersion, err := strconv.Atoi(cudaVersionParts[0])
			if err == nil {
45
				return cudaMajorVersion
46
47
48
49
50
51
52
			}
		}
	}

	// fallback to nvidia-smi
	cmd = exec.Command("nvidia-smi")
	output, err = cmd.CombinedOutput()
Bruce MacDonald's avatar
Bruce MacDonald committed
53
	if err != nil {
54
		return -1
Bruce MacDonald's avatar
Bruce MacDonald committed
55
	}
56

57
58
59
	re := regexp.MustCompile(`CUDA Version: (\d+\.\d+)`)
	matches := re.FindStringSubmatch(string(output))
	if len(matches) < 2 {
60
		return -1
Bruce MacDonald's avatar
Bruce MacDonald committed
61
	}
62

63
64
65
66
	cudaVersion := matches[1]
	cudaVersionParts := strings.Split(cudaVersion, ".")
	cudaMajorVersion, err := strconv.Atoi(cudaVersionParts[0])
	if err != nil {
67
		return -1
68
	}
69
	return cudaMajorVersion
70
71
}

72
73
74
type ModelRunner struct {
	Path string // path to the model runner executable
}
75

76
77
78
func chooseRunners(runnerType string) []ModelRunner {
	buildPath := path.Join("llama.cpp", runnerType, "build")
	var runners []string
79

80
81
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
82
83
	switch runtime.GOOS {
	case "darwin":
84
85
86
		runners = []string{
			path.Join(buildPath, "metal", "bin", "server"),
			path.Join(buildPath, "cpu", "bin", "server"),
87
		}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
	case "linux":
		cuda := cudaVersion()
		if cuda == 11 {
			// prioritize CUDA 11 runner
			runners = []string{
				path.Join(buildPath, "cuda-11", "bin", "server"),
				path.Join(buildPath, "cuda-12", "bin", "server"),
				path.Join(buildPath, "cpu", "bin", "server"),
			}
		} else {
			runners = []string{
				path.Join(buildPath, "cuda-12", "bin", "server"),
				path.Join(buildPath, "cuda-11", "bin", "server"),
				path.Join(buildPath, "cpu", "bin", "server"),
102
103
104
105
			}
		}
	case "windows":
		// TODO: select windows GPU runner here when available
106
107
108
		runners = []string{
			path.Join(buildPath, "cpu", "bin", "Release", "server.exe"),
		}
109
110
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
111
112
		runners = []string{
			path.Join(buildPath, "cpu", "bin", "server"),
113
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
114
	}
115

116
	// copy the files locally to run the llama.cpp server
117
118
119
120
121
122
123
124
	tmpDir, err := os.MkdirTemp("", "llama-*")
	if err != nil {
		log.Fatalf("load llama runner: failed to create temp dir: %v", err)
	}
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
		files, err := fs.Glob(llamaCppEmbed, filepath.Join(filepath.Dir(r), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
125
		if err != nil {
126
127
128
			// this is expected, ollama may be compiled without all runners packed in
			log.Printf("%s runner not found: %v", r, err)
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
129
		}
130
		runnerAvailable = true
131

132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
		for _, f := range files {
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

			// create the directory in case it does not exist
			destPath := filepath.Join(tmpDir, filepath.Dir(f))
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
			destFile, err := os.OpenFile(filepath.Join(destPath, filepath.Base(f)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
			if err != nil {
				log.Fatalf("write llama runner %s: %v", f, err)
			}
			defer destFile.Close()
149

150
151
152
			if _, err := io.Copy(destFile, srcFile); err != nil {
				log.Fatalf("copy llama runner %s: %v", f, err)
			}
153
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
154
	}
155
156
157
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
158

159
160
161
162
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{Path: path.Join(tmpDir, r)})
Bruce MacDonald's avatar
Bruce MacDonald committed
163
	}
164

165
	return localRunnersByPriority
166
167
168
169
170
171
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
172
173
func (llm *llamaModel) ModelFamily() string {
	return "llama"
174
175
}

Michael Yang's avatar
Michael Yang committed
176
177
func llamaModelType(numLayer uint32) string {
	switch numLayer {
178
	case 26:
Michael Yang's avatar
Michael Yang committed
179
		return "3B"
180
	case 32:
Michael Yang's avatar
Michael Yang committed
181
		return "7B"
182
	case 40:
Michael Yang's avatar
Michael Yang committed
183
		return "13B"
184
	case 48:
Michael Yang's avatar
Michael Yang committed
185
		return "34B"
186
	case 60:
Michael Yang's avatar
Michael Yang committed
187
		return "30B"
188
	case 80:
Michael Yang's avatar
Michael Yang committed
189
190
191
		return "65B"
	default:
		return "Unknown"
192
	}
Michael Yang's avatar
Michael Yang committed
193
}
194

Michael Yang's avatar
Michael Yang committed
195
196
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
197
198
}

Michael Yang's avatar
Michael Yang committed
199
200
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
}

type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
217
	FileType uint32
218
219
220
221
222
223
224
225
226
227
228
229
230
}

type Running struct {
	Port   int
	Cmd    *exec.Cmd
	Cancel context.CancelFunc
}

type llama struct {
	api.Options
	Running
}

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
var errNoGPU = errors.New("nvidia-smi command failed")

// CheckVRAM returns the available VRAM in MiB on Linux machines with NVIDIA GPUs
func CheckVRAM() (int, error) {
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits")
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
		return 0, errNoGPU
	}

	var total int
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
		vram, err := strconv.Atoi(line)
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

		total += vram
	}

	return total, nil
}

func NumGPU(opts api.Options) int {
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
	n := 1 // default to enable metal on macOS
	if runtime.GOOS == "linux" {
		vram, err := CheckVRAM()
		if err != nil {
			if err.Error() != "nvidia-smi command failed" {
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
		// TODO: this is a very rough heuristic, better would be to calculate this based on number of layers and context size
		switch {
		case vram < 500:
			log.Printf("WARNING: Low VRAM detected, disabling GPU")
			n = 0
		case vram < 1000:
			n = 4
		case vram < 2000:
			n = 8
		case vram < 4000:
			n = 12
		case vram < 8000:
			n = 16
		case vram < 12000:
			n = 24
		case vram < 16000:
			n = 32
		default:
			n = 48
		}
		log.Printf("%d MB VRAM available, loading %d GPU layers", vram, n)
	}
	return n
}

297
func newLlama(model string, adapters []string, runners []ModelRunner, opts api.Options) (*llama, error) {
298
299
300
301
302
303
304
305
306
307
308
309
310
311
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
		"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
312
		"--n-gpu-layers", fmt.Sprintf("%d", NumGPU(opts)),
313
314
315
		"--embedding",
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
316
317
318
319
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

	// start the llama.cpp server with a retry in case the port is already in use
343
344
345
346
347
348
	for _, runner := range runners {
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

349
350
351
352
353
354
355
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Bruce MacDonald's avatar
Bruce MacDonald committed
356
		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", filepath.Dir(runner.Path)))
357
358
		cmd.Stdout = os.Stderr
		cmd.Stderr = os.Stderr
359
360
361

		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel}}

362
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
363
		if err := llm.Cmd.Start(); err != nil {
364
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
365
366
367
			continue
		}

368
369
370
371
372
373
374
375
376
377
		// monitor the command, it is blocking, so if it exits we need to capture that
		go func() {
			err := llm.Cmd.Wait() // this will block until the command exits
			if err != nil {
				log.Printf("llama runner exited with error: %v", err)
			} else {
				log.Printf("llama runner exited")
			}
		}()

378
		if err := waitForServer(llm); err != nil {
379
			log.Printf("error starting llama runner: %v", err)
380
381
382
383
			llm.Close()
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
384

385
386
387
388
		// server started successfully
		return llm, nil
	}

389
	return nil, fmt.Errorf("failed to start a llama runner")
390
391
392
393
394
}

func waitForServer(llm *llama) error {
	// wait for the server to start responding
	start := time.Now()
395
	expiresAt := time.Now().Add(2 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
396
	ticker := time.NewTicker(200 * time.Millisecond)
397

398
	log.Print("waiting for llama runner to start responding")
Bruce MacDonald's avatar
Bruce MacDonald committed
399
400
	for range ticker.C {
		if time.Now().After(expiresAt) {
401
402
403
404
405
406
			return fmt.Errorf("llama runner did not start within alloted time, retrying")
		}

		// check if the server process has terminated
		if llm.Cmd.ProcessState != nil && llm.Cmd.ProcessState.Exited() {
			return fmt.Errorf("llama runner process has terminated")
Bruce MacDonald's avatar
Bruce MacDonald committed
407
		}
408

Bruce MacDonald's avatar
Bruce MacDonald committed
409
410
		if err := llm.Ping(context.Background()); err == nil {
			break
411
412
		}
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
413

414
	log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
Bruce MacDonald's avatar
Bruce MacDonald committed
415
	return nil
416
417
418
}

func (llm *llama) Close() {
Bruce MacDonald's avatar
Bruce MacDonald committed
419
	llm.Cancel()
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

type GenerationSettings struct {
	FrequencyPenalty float64       `json:"frequency_penalty"`
	IgnoreEOS        bool          `json:"ignore_eos"`
	LogitBias        []interface{} `json:"logit_bias"`
	Mirostat         int           `json:"mirostat"`
	MirostatEta      float64       `json:"mirostat_eta"`
	MirostatTau      float64       `json:"mirostat_tau"`
	Model            string        `json:"model"`
	NCtx             int           `json:"n_ctx"`
	NKeep            int           `json:"n_keep"`
	NPredict         int           `json:"n_predict"`
	NProbs           int           `json:"n_probs"`
	PenalizeNl       bool          `json:"penalize_nl"`
	PresencePenalty  float64       `json:"presence_penalty"`
	RepeatLastN      int           `json:"repeat_last_n"`
	RepeatPenalty    float64       `json:"repeat_penalty"`
	Seed             uint32        `json:"seed"`
	Stop             []string      `json:"stop"`
	Stream           bool          `json:"stream"`
	Temp             float64       `json:"temp"`
	TfsZ             float64       `json:"tfs_z"`
	TopK             int           `json:"top_k"`
	TopP             float64       `json:"top_p"`
	TypicalP         float64       `json:"typical_p"`
}

type Timings struct {
Michael Yang's avatar
Michael Yang committed
453
454
455
456
	PredictedN  int     `json:"predicted_n"`
	PredictedMS float64 `json:"predicted_ms"`
	PromptN     int     `json:"prompt_n"`
	PromptMS    float64 `json:"prompt_ms"`
457
458
}

Michael Yang's avatar
Michael Yang committed
459
460
461
462
463
464
465
type Prediction struct {
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

	Timings `json:"timings"`
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
}

type PredictRequest struct {
	Stream           bool            `json:"stream"`
	NPredict         int             `json:"n_predict,omitempty"`
	TopK             int             `json:"top_k,omitempty"`
	TopP             float32         `json:"top_p,omitempty"`
	TfsZ             float32         `json:"tfs_z,omitempty"`
	TypicalP         float32         `json:"typical_p,omitempty"`
	RepeatLastN      int             `json:"repeat_last_n,omitempty"`
	Temperature      float32         `json:"temperature,omitempty"`
	RepeatPenalty    float32         `json:"repeat_penalty,omitempty"`
	PresencePenalty  float32         `json:"presence_penalty,omitempty"`
	FrequencyPenalty float32         `json:"frequency_penalty,omitempty"`
	Mirostat         int             `json:"mirostat,omitempty"`
	MirostatTau      float32         `json:"mirostat_tau,omitempty"`
	MirostatEta      float32         `json:"mirostat_eta,omitempty"`
	PenalizeNl       bool            `json:"penalize_nl,omitempty"`
	NKeep            int             `json:"n_keep,omitempty"`
	Seed             int             `json:"seed,omitempty"`
	Prompt           string          `json:"prompt,omitempty"`
	NProbs           int             `json:"n_probs,omitempty"`
	LogitBias        map[int]float32 `json:"logit_bias,omitempty"`
	IgnoreEos        bool            `json:"ignore_eos,omitempty"`
	Stop             []string        `json:"stop,omitempty"`
}

493
494
func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, fn func(api.GenerateResponse)) error {
	prevConvo, err := llm.Decode(ctx, prevContext)
495
	if err != nil {
496
		return err
497
	}
498
499
500
501
502

	var nextContext strings.Builder
	nextContext.WriteString(prevConvo)
	nextContext.WriteString(prompt)

503
504
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
	predReq := PredictRequest{
505
		Prompt:           nextContext.String(),
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
		Stream:           true,
		NPredict:         llm.NumPredict,
		NKeep:            llm.NumKeep,
		Temperature:      llm.Temperature,
		TopK:             llm.TopK,
		TopP:             llm.TopP,
		TfsZ:             llm.TFSZ,
		TypicalP:         llm.TypicalP,
		RepeatLastN:      llm.RepeatLastN,
		RepeatPenalty:    llm.RepeatPenalty,
		PresencePenalty:  llm.PresencePenalty,
		FrequencyPenalty: llm.FrequencyPenalty,
		Mirostat:         llm.Mirostat,
		MirostatTau:      llm.MirostatTau,
		MirostatEta:      llm.MirostatEta,
		PenalizeNl:       llm.PenalizeNewline,
		Stop:             llm.Stop,
	}
	data, err := json.Marshal(predReq)
	if err != nil {
		return fmt.Errorf("error marshaling data: %v", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Text()
			if line == "" {
				continue
			}

			// Read data from the server-side event stream
			if strings.HasPrefix(line, "data: ") {
				evt := line[6:]
Michael Yang's avatar
Michael Yang committed
565
566
567
				var p Prediction
				if err := json.Unmarshal([]byte(evt), &p); err != nil {
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
568
569
				}

Michael Yang's avatar
Michael Yang committed
570
571
572
573
				if p.Content != "" {
					fn(api.GenerateResponse{Response: p.Content})
					nextContext.WriteString(p.Content)
				}
Michael Yang's avatar
Michael Yang committed
574
575

				if p.Stop {
576
					embd, err := llm.Encode(ctx, nextContext.String())
577
578
579
					if err != nil {
						return fmt.Errorf("encoding context: %v", err)
					}
580

581
582
583
					fn(api.GenerateResponse{
						Done:               true,
						Context:            embd,
Michael Yang's avatar
Michael Yang committed
584
585
586
587
						PromptEvalCount:    p.PromptN,
						PromptEvalDuration: parseDurationMs(p.PromptMS),
						EvalCount:          p.PredictedN,
						EvalDuration:       parseDurationMs(p.PredictedMS),
588
589
					})

Michael Yang's avatar
Michael Yang committed
590
					return nil
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	// decoded content contains a leading whitespace
	decoded.Content, _ = strings.CutPrefix(decoded.Content, "")

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
746
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
747
748
749
750
751
752
753
754
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}