llama.go 18.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

	"github.com/jmorganca/ollama/api"
)

Bruce MacDonald's avatar
Bruce MacDonald committed
28
//go:embed llama.cpp/*/build/*/bin/*
29
30
var llamaCppEmbed embed.FS

31
32
33
type ModelRunner struct {
	Path string // path to the model runner executable
}
34

35
func chooseRunners(workDir, runnerType string) []ModelRunner {
36
37
	buildPath := path.Join("llama.cpp", runnerType, "build")
	var runners []string
38

39
40
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
41
42
	switch runtime.GOOS {
	case "darwin":
43
44
45
		runners = []string{
			path.Join(buildPath, "metal", "bin", "server"),
			path.Join(buildPath, "cpu", "bin", "server"),
46
		}
47
	case "linux":
Bruce MacDonald's avatar
Bruce MacDonald committed
48
49
50
		runners = []string{
			path.Join(buildPath, "cuda", "bin", "server"),
			path.Join(buildPath, "cpu", "bin", "server"),
51
52
53
		}
	case "windows":
		// TODO: select windows GPU runner here when available
54
55
56
		runners = []string{
			path.Join(buildPath, "cpu", "bin", "Release", "server.exe"),
		}
57
58
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
59
60
		runners = []string{
			path.Join(buildPath, "cpu", "bin", "server"),
61
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
62
	}
63

64
65
66
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
Bruce MacDonald's avatar
Bruce MacDonald committed
67
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
68
		if err != nil {
69
70
71
			// this is expected, ollama may be compiled without all runners packed in
			log.Printf("%s runner not found: %v", r, err)
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
72
		}
73

74
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
75
76
			runnerAvailable = true

77
78
79
80
81
82
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
83
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
84
			destPath := filepath.Join(workDir, filepath.Dir(f))
85
86
87
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
88

Bruce MacDonald's avatar
Bruce MacDonald committed
89
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
106
			}
107
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
108
	}
109
110
111
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
112

113
114
115
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
116
117
		// clean the ModelRunner paths so that they match the OS we are running on
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{Path: filepath.Clean(path.Join(workDir, r))})
Bruce MacDonald's avatar
Bruce MacDonald committed
118
	}
119

120
	return localRunnersByPriority
121
122
123
124
125
126
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
127
128
func (llm *llamaModel) ModelFamily() string {
	return "llama"
129
130
}

Michael Yang's avatar
Michael Yang committed
131
132
func llamaModelType(numLayer uint32) string {
	switch numLayer {
133
	case 26:
Michael Yang's avatar
Michael Yang committed
134
		return "3B"
135
	case 32:
Michael Yang's avatar
Michael Yang committed
136
		return "7B"
137
	case 40:
Michael Yang's avatar
Michael Yang committed
138
		return "13B"
139
	case 48:
Michael Yang's avatar
Michael Yang committed
140
		return "34B"
141
	case 60:
Michael Yang's avatar
Michael Yang committed
142
		return "30B"
143
	case 80:
Michael Yang's avatar
Michael Yang committed
144
145
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
146
		return "unknown"
147
	}
Michael Yang's avatar
Michael Yang committed
148
}
149

Michael Yang's avatar
Michael Yang committed
150
151
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
152
153
}

Michael Yang's avatar
Michael Yang committed
154
155
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
156
157
}

158
159
160
161
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

162
163
164
165
166
167
168
169
170
171
172
173
174
175
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
176
	FileType uint32
177
178
179
180
181
182
183
184
185
186
187
188
189
}

type Running struct {
	Port   int
	Cmd    *exec.Cmd
	Cancel context.CancelFunc
}

type llama struct {
	api.Options
	Running
}

190
191
192
var errNoGPU = errors.New("nvidia-smi command failed")

// CheckVRAM returns the available VRAM in MiB on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
193
func CheckVRAM() (int64, error) {
194
195
196
197
198
199
200
201
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits")
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
		return 0, errNoGPU
	}

Michael Yang's avatar
Michael Yang committed
202
	var total int64
203
204
205
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
Michael Yang's avatar
Michael Yang committed
206
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
207
208
209
210
211
212
213
214
215
216
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

		total += vram
	}

	return total, nil
}

217
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
218
219
220
221
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
	if runtime.GOOS == "linux" {
222
		vramMib, err := CheckVRAM()
223
224
225
226
227
228
229
		if err != nil {
			if err.Error() != "nvidia-smi command failed" {
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
230
231
232
233
234
235
236

		totalVramBytes := int64(vramMib) * 1024 * 1024 // 1 MiB = 1024^2 bytes

		// Calculate bytes per layer
		// TODO: this is a rough heuristic, better would be to calculate this based on number of layers and context size
		bytesPerLayer := fileSizeBytes / numLayer

237
238
239
		// max number of layers we can fit in VRAM
		layers := int(totalVramBytes / bytesPerLayer)
		log.Printf("%d MiB VRAM available, loading up to %d GPU layers", vramMib, layers)
240

241
		return layers
242
	}
243
244
	// default to enable metal on macOS
	return 1
245
246
}

247
248
249
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
	fileInfo, err := os.Stat(model)
	if err != nil {
250
251
252
253
254
255
256
257
258
259
260
261
262
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
		"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
263
		"--n-gpu-layers", fmt.Sprintf("%d", NumGPU(numLayers, fileInfo.Size(), opts)),
264
265
266
		"--embedding",
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
267
268
269
270
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

	// start the llama.cpp server with a retry in case the port is already in use
294
295
296
297
298
299
	for _, runner := range runners {
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

300
301
302
303
304
305
306
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Bruce MacDonald's avatar
Bruce MacDonald committed
307
		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", filepath.Dir(runner.Path)))
308
309
		cmd.Stdout = os.Stderr
		cmd.Stderr = os.Stderr
310
311
312

		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel}}

313
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
314
		if err := llm.Cmd.Start(); err != nil {
315
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
316
317
318
			continue
		}

319
320
321
322
323
324
325
326
327
328
		// monitor the command, it is blocking, so if it exits we need to capture that
		go func() {
			err := llm.Cmd.Wait() // this will block until the command exits
			if err != nil {
				log.Printf("llama runner exited with error: %v", err)
			} else {
				log.Printf("llama runner exited")
			}
		}()

329
		if err := waitForServer(llm); err != nil {
330
			log.Printf("error starting llama runner: %v", err)
331
332
333
334
			llm.Close()
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
335

336
337
338
339
		// server started successfully
		return llm, nil
	}

340
	return nil, fmt.Errorf("failed to start a llama runner")
341
342
343
344
345
}

func waitForServer(llm *llama) error {
	// wait for the server to start responding
	start := time.Now()
346
	expiresAt := time.Now().Add(2 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
347
	ticker := time.NewTicker(200 * time.Millisecond)
348

349
	log.Print("waiting for llama runner to start responding")
Bruce MacDonald's avatar
Bruce MacDonald committed
350
351
	for range ticker.C {
		if time.Now().After(expiresAt) {
352
353
354
355
356
357
			return fmt.Errorf("llama runner did not start within alloted time, retrying")
		}

		// check if the server process has terminated
		if llm.Cmd.ProcessState != nil && llm.Cmd.ProcessState.Exited() {
			return fmt.Errorf("llama runner process has terminated")
Bruce MacDonald's avatar
Bruce MacDonald committed
358
		}
359

Bruce MacDonald's avatar
Bruce MacDonald committed
360
361
		if err := llm.Ping(context.Background()); err == nil {
			break
362
363
		}
	}
Bruce MacDonald's avatar
Bruce MacDonald committed
364

365
	log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
Bruce MacDonald's avatar
Bruce MacDonald committed
366
	return nil
367
368
369
}

func (llm *llama) Close() {
Bruce MacDonald's avatar
Bruce MacDonald committed
370
	llm.Cancel()
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

type GenerationSettings struct {
	FrequencyPenalty float64       `json:"frequency_penalty"`
	IgnoreEOS        bool          `json:"ignore_eos"`
	LogitBias        []interface{} `json:"logit_bias"`
	Mirostat         int           `json:"mirostat"`
	MirostatEta      float64       `json:"mirostat_eta"`
	MirostatTau      float64       `json:"mirostat_tau"`
	Model            string        `json:"model"`
	NCtx             int           `json:"n_ctx"`
	NKeep            int           `json:"n_keep"`
	NPredict         int           `json:"n_predict"`
	NProbs           int           `json:"n_probs"`
	PenalizeNl       bool          `json:"penalize_nl"`
	PresencePenalty  float64       `json:"presence_penalty"`
	RepeatLastN      int           `json:"repeat_last_n"`
	RepeatPenalty    float64       `json:"repeat_penalty"`
	Seed             uint32        `json:"seed"`
	Stop             []string      `json:"stop"`
	Stream           bool          `json:"stream"`
	Temp             float64       `json:"temp"`
	TfsZ             float64       `json:"tfs_z"`
	TopK             int           `json:"top_k"`
	TopP             float64       `json:"top_p"`
	TypicalP         float64       `json:"typical_p"`
}

type Timings struct {
Michael Yang's avatar
Michael Yang committed
404
405
406
407
	PredictedN  int     `json:"predicted_n"`
	PredictedMS float64 `json:"predicted_ms"`
	PromptN     int     `json:"prompt_n"`
	PromptMS    float64 `json:"prompt_ms"`
408
409
}

Michael Yang's avatar
Michael Yang committed
410
411
412
413
414
415
416
type Prediction struct {
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

	Timings `json:"timings"`
417
418
419
}

type PredictRequest struct {
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
	Prompt           string   `json:"prompt"`
	Stream           bool     `json:"stream"`
	NPredict         int      `json:"n_predict"`
	NKeep            int      `json:"n_keep"`
	Temperature      float32  `json:"temperature"`
	TopK             int      `json:"top_k"`
	TopP             float32  `json:"top_p"`
	TfsZ             float32  `json:"tfs_z"`
	TypicalP         float32  `json:"typical_p"`
	RepeatLastN      int      `json:"repeat_last_n"`
	RepeatPenalty    float32  `json:"repeat_penalty"`
	PresencePenalty  float32  `json:"presence_penalty"`
	FrequencyPenalty float32  `json:"frequency_penalty"`
	Mirostat         int      `json:"mirostat"`
	MirostatTau      float32  `json:"mirostat_tau"`
	MirostatEta      float32  `json:"mirostat_eta"`
	PenalizeNl       bool     `json:"penalize_nl"`
	Seed             int      `json:"seed"`
	Stop             []string `json:"stop,omitempty"`
439
440
}

441
442
func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, fn func(api.GenerateResponse)) error {
	prevConvo, err := llm.Decode(ctx, prevContext)
443
	if err != nil {
444
		return err
445
	}
446
447
448
449
450

	var nextContext strings.Builder
	nextContext.WriteString(prevConvo)
	nextContext.WriteString(prompt)

451
452
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
	predReq := PredictRequest{
453
		Prompt:           nextContext.String(),
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
		Stream:           true,
		NPredict:         llm.NumPredict,
		NKeep:            llm.NumKeep,
		Temperature:      llm.Temperature,
		TopK:             llm.TopK,
		TopP:             llm.TopP,
		TfsZ:             llm.TFSZ,
		TypicalP:         llm.TypicalP,
		RepeatLastN:      llm.RepeatLastN,
		RepeatPenalty:    llm.RepeatPenalty,
		PresencePenalty:  llm.PresencePenalty,
		FrequencyPenalty: llm.FrequencyPenalty,
		Mirostat:         llm.Mirostat,
		MirostatTau:      llm.MirostatTau,
		MirostatEta:      llm.MirostatEta,
		PenalizeNl:       llm.PenalizeNewline,
470
		Seed:             llm.Seed,
471
472
		Stop:             llm.Stop,
	}
473

474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
	data, err := json.Marshal(predReq)
	if err != nil {
		return fmt.Errorf("error marshaling data: %v", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Text()
			if line == "" {
				continue
			}

			// Read data from the server-side event stream
			if strings.HasPrefix(line, "data: ") {
				evt := line[6:]
Michael Yang's avatar
Michael Yang committed
515
516
517
				var p Prediction
				if err := json.Unmarshal([]byte(evt), &p); err != nil {
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
518
519
				}

Michael Yang's avatar
Michael Yang committed
520
521
522
523
				if p.Content != "" {
					fn(api.GenerateResponse{Response: p.Content})
					nextContext.WriteString(p.Content)
				}
Michael Yang's avatar
Michael Yang committed
524
525

				if p.Stop {
526
					embd, err := llm.Encode(ctx, nextContext.String())
527
528
529
					if err != nil {
						return fmt.Errorf("encoding context: %v", err)
					}
530

531
532
533
					fn(api.GenerateResponse{
						Done:               true,
						Context:            embd,
Michael Yang's avatar
Michael Yang committed
534
535
536
537
						PromptEvalCount:    p.PromptN,
						PromptEvalDuration: parseDurationMs(p.PromptMS),
						EvalCount:          p.PredictedN,
						EvalDuration:       parseDurationMs(p.PredictedMS),
538
539
					})

Michael Yang's avatar
Michael Yang committed
540
					return nil
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	// decoded content contains a leading whitespace
	decoded.Content, _ = strings.CutPrefix(decoded.Content, "")

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
696
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
697
698
699
700
701
702
703
704
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}