"syntaxnet/dragnn/viz/Dockerfile" did not exist on "c774cc95e544d21e06447abb791374b217ec090a"
llama.go 20.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package llm

import (
	"bufio"
	"bytes"
	"context"
	"embed"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/fs"
	"log"
	"math/rand"
	"net/http"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
23
	"sync"
24
25
26
	"time"

	"github.com/jmorganca/ollama/api"
Michael Yang's avatar
Michael Yang committed
27
	"github.com/jmorganca/ollama/format"
28
29
)

Bruce MacDonald's avatar
Bruce MacDonald committed
30
//go:embed llama.cpp/*/build/*/bin/*
31
32
var llamaCppEmbed embed.FS

33
type ModelRunner struct {
34
35
	Path        string // path to the model runner executable
	Accelerated bool
36
}
37

38
func chooseRunners(workDir, runnerType string) []ModelRunner {
39
	buildPath := path.Join("llama.cpp", runnerType, "build")
40
	var runners []ModelRunner
41

42
43
	// set the runners based on the OS
	// IMPORTANT: the order of the runners in the array is the priority order
Bruce MacDonald's avatar
Bruce MacDonald committed
44
45
	switch runtime.GOOS {
	case "darwin":
46
47
48
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
49
		}
50
	case "linux":
51
52
53
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
54
55
56
		}
	case "windows":
		// TODO: select windows GPU runner here when available
57
58
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
59
		}
60
61
	default:
		log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
62
63
		runners = []ModelRunner{
			{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
64
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
65
	}
66

67
68
69
	runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
	for _, r := range runners {
		// find all the files in the runner's bin directory
70
		files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
Bruce MacDonald's avatar
Bruce MacDonald committed
71
		if err != nil {
72
			// this is expected, ollama may be compiled without all runners packed in
Michael Yang's avatar
Michael Yang committed
73
			log.Printf("%s runner not found: %v", r.Path, err)
74
			continue
Bruce MacDonald's avatar
Bruce MacDonald committed
75
		}
76

77
		for _, f := range files {
Bruce MacDonald's avatar
Bruce MacDonald committed
78
79
			runnerAvailable = true

80
81
82
83
84
85
			srcFile, err := llamaCppEmbed.Open(f)
			if err != nil {
				log.Fatalf("read llama runner %s: %v", f, err)
			}
			defer srcFile.Close()

Bruce MacDonald's avatar
Bruce MacDonald committed
86
			// create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
87
			destPath := filepath.Join(workDir, filepath.Dir(f))
88
89
90
			if err := os.MkdirAll(destPath, 0o755); err != nil {
				log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
			}
91

Bruce MacDonald's avatar
Bruce MacDonald committed
92
			// create the path to the destination file, filepath.Base() converts the file path to the OS's format
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
			destFile := filepath.Join(destPath, filepath.Base(f))

			_, err = os.Stat(destFile)
			switch {
			case errors.Is(err, os.ErrNotExist):
				destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
				if err != nil {
					log.Fatalf("write llama runner %s: %v", f, err)
				}
				defer destFile.Close()

				if _, err := io.Copy(destFile, srcFile); err != nil {
					log.Fatalf("copy llama runner %s: %v", f, err)
				}
			case err != nil:
				log.Fatalf("stat llama runner %s: %v", f, err)
109
			}
110
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
111
	}
112
113
114
	if !runnerAvailable {
		log.Fatalf("%s runner not found", runnerType)
	}
115

116
117
118
	// return the runners to try in priority order
	localRunnersByPriority := []ModelRunner{}
	for _, r := range runners {
Bruce MacDonald's avatar
Bruce MacDonald committed
119
		// clean the ModelRunner paths so that they match the OS we are running on
120
121
122
123
		localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
			Path:        filepath.Clean(path.Join(workDir, r.Path)),
			Accelerated: r.Accelerated,
		})
Bruce MacDonald's avatar
Bruce MacDonald committed
124
	}
125

126
	return localRunnersByPriority
127
128
129
130
131
132
}

type llamaModel struct {
	hyperparameters llamaHyperparameters
}

Michael Yang's avatar
Michael Yang committed
133
134
func (llm *llamaModel) ModelFamily() string {
	return "llama"
135
136
}

Michael Yang's avatar
Michael Yang committed
137
138
func llamaModelType(numLayer uint32) string {
	switch numLayer {
139
	case 26:
Michael Yang's avatar
Michael Yang committed
140
		return "3B"
141
	case 32:
Michael Yang's avatar
Michael Yang committed
142
		return "7B"
143
	case 40:
Michael Yang's avatar
Michael Yang committed
144
		return "13B"
145
	case 48:
Michael Yang's avatar
Michael Yang committed
146
		return "34B"
147
	case 60:
Michael Yang's avatar
Michael Yang committed
148
		return "30B"
149
	case 80:
Michael Yang's avatar
Michael Yang committed
150
151
		return "65B"
	default:
Michael Yang's avatar
Michael Yang committed
152
		return "unknown"
153
	}
Michael Yang's avatar
Michael Yang committed
154
}
155

Michael Yang's avatar
Michael Yang committed
156
157
func (llm *llamaModel) ModelType() string {
	return llamaModelType(llm.hyperparameters.NumLayer)
158
159
}

Michael Yang's avatar
Michael Yang committed
160
161
func (llm *llamaModel) FileType() string {
	return fileType(llm.hyperparameters.FileType)
162
163
}

164
165
166
167
func (llm *llamaModel) NumLayers() int64 {
	return int64(llm.hyperparameters.NumLayer)
}

168
169
170
171
172
173
174
175
176
177
178
179
180
181
type llamaHyperparameters struct {
	// NumVocab is the size of the model's vocabulary.
	NumVocab uint32

	// NumEmbd is the size of the model's embedding layer.
	NumEmbd uint32
	NumMult uint32
	NumHead uint32

	// NumLayer is the number of layers in the model.
	NumLayer uint32
	NumRot   uint32

	// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
Michael Yang's avatar
Michael Yang committed
182
	FileType uint32
183
184
185
}

type Running struct {
186
187
188
189
190
191
	Port          int
	Cmd           *exec.Cmd
	Cancel        context.CancelFunc
	exitOnce      sync.Once
	exitCh        chan error // channel to receive the exit status of the subprocess
	*StatusWriter            // captures error messages from the llama runner process
192
193
194
195
196
197
198
}

type llama struct {
	api.Options
	Running
}

199
200
var errNoGPU = errors.New("nvidia-smi command failed")

Michael Yang's avatar
Michael Yang committed
201
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
Michael Yang's avatar
Michael Yang committed
202
func CheckVRAM() (int64, error) {
203
	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
204
205
206
207
208
209
210
	var stdout bytes.Buffer
	cmd.Stdout = &stdout
	err := cmd.Run()
	if err != nil {
		return 0, errNoGPU
	}

Michael Yang's avatar
Michael Yang committed
211
	var freeMiB int64
212
213
214
	scanner := bufio.NewScanner(&stdout)
	for scanner.Scan() {
		line := scanner.Text()
Michael Yang's avatar
Michael Yang committed
215
		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
216
217
218
219
		if err != nil {
			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
		}

Michael Yang's avatar
Michael Yang committed
220
		freeMiB += vram
221
222
	}

Michael Yang's avatar
Michael Yang committed
223
224
	freeBytes := freeMiB * 1024 * 1024
	if freeBytes < 2*format.GigaByte {
Michael Yang's avatar
Michael Yang committed
225
		log.Printf("less than 2 GB VRAM available, falling back to CPU only")
Michael Yang's avatar
Michael Yang committed
226
		freeMiB = 0
Michael Yang's avatar
Michael Yang committed
227
228
	}

Michael Yang's avatar
Michael Yang committed
229
	return freeBytes, nil
230
231
}

232
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
233
234
235
236
	if opts.NumGPU != -1 {
		return opts.NumGPU
	}
	if runtime.GOOS == "linux" {
Michael Yang's avatar
Michael Yang committed
237
		freeBytes, err := CheckVRAM()
238
239
240
241
242
243
244
		if err != nil {
			if err.Error() != "nvidia-smi command failed" {
				log.Print(err.Error())
			}
			// nvidia driver not installed or no nvidia GPU found
			return 0
		}
245

246
247
248
249
250
		/*
		 Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
		 We can store the model weights and the kv cache in vram,
		 to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
		*/
251
252
		bytesPerLayer := fileSizeBytes / numLayer

253
254
		// 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
		layers := int(freeBytes/bytesPerLayer) * 3 / 4
255
		log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
256

257
		return layers
258
	}
259
260
	// default to enable metal on macOS
	return 1
261
262
}

263
264
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
265
266
	ErrCh      chan error
	LastErrMsg string
267
268
269
270
271
272
273
274
275
}

func NewStatusWriter() *StatusWriter {
	return &StatusWriter{
		ErrCh: make(chan error, 1),
	}
}

func (w *StatusWriter) Write(b []byte) (int, error) {
276
	var errMsg string
277
	if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
278
279
280
		errMsg = string(bytes.TrimSpace(after))
	} else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
		errMsg = string(bytes.TrimSpace(after))
281
	}
282
283
284
285
286
287

	if errMsg != "" {
		w.LastErrMsg = errMsg
		w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
	}

288
289
290
	return os.Stderr.Write(b)
}

291
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
292
293
	fileInfo, err := os.Stat(model)
	if err != nil {
294
295
296
297
298
299
300
		return nil, err
	}

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

301
	numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
302
303
304
305
306
307
	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
		"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
308
		"--n-gpu-layers", fmt.Sprintf("%d", numGPU),
309
310
311
		"--embedding",
	}

Bruce MacDonald's avatar
Bruce MacDonald committed
312
313
314
315
	if opts.NumGQA > 0 {
		params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
	}

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}
	if opts.UseMLock {
		params = append(params, "--mlock")
	}
	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}
	if opts.UseNUMA {
		params = append(params, "--numa")
	}

338
339
	var runnerErr error

340
	// start the llama.cpp server with a retry in case the port is already in use
341
	for _, runner := range runners {
342
343
344
345
346
		if runner.Accelerated && numGPU == 0 {
			log.Printf("skipping accelerated runner because num_gpu=0")
			continue
		}

347
348
349
350
351
		if _, err := os.Stat(runner.Path); err != nil {
			log.Printf("llama runner not found: %v", err)
			continue
		}

352
353
354
355
356
357
358
		port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		ctx, cancel := context.WithCancel(context.Background())
		cmd := exec.CommandContext(
			ctx,
			runner.Path,
			append(params, "--port", strconv.Itoa(port))...,
		)
Bruce MacDonald's avatar
Bruce MacDonald committed
359
		cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", filepath.Dir(runner.Path)))
360
		cmd.Stdout = os.Stderr
361
362
		statusWriter := NewStatusWriter()
		cmd.Stderr = statusWriter
363

364
		llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
365

366
		log.Print("starting llama runner")
Bruce MacDonald's avatar
Bruce MacDonald committed
367
		if err := llm.Cmd.Start(); err != nil {
368
			log.Printf("error starting the external llama runner: %v", err)
Bruce MacDonald's avatar
Bruce MacDonald committed
369
370
371
			continue
		}

372
		// monitor the llama runner process and signal when it exits
373
		go func() {
374
			err := llm.Cmd.Wait()
375
376
377
378
379
380
381
			// default to printing the exit message of the command process, it will probably just say 'exit staus 1'
			errMsg := err.Error()
			// try to set a better error message if llama runner logs captured an error
			if statusWriter.LastErrMsg != "" {
				errMsg = statusWriter.LastErrMsg
			}
			log.Println(errMsg)
382
383
384
385
			// llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
			llm.exitOnce.Do(func() {
				close(llm.exitCh)
			})
386
387
		}()

388
		if err := waitForServer(llm); err != nil {
389
			log.Printf("error starting llama runner: %v", err)
390
			llm.Close()
391
392
393
394
395
396
397
398
399
400
401

			// default the runnerErr to the error returned by the most recent llama runner process
			runnerErr = err

			// capture the error directly from the runner process, if any
			select {
			case runnerErr = <-statusWriter.ErrCh:
			default:
				// the runner process probably timed out
			}

402
403
404
			// try again
			continue
		}
Bruce MacDonald's avatar
Bruce MacDonald committed
405

406
407
408
409
		// server started successfully
		return llm, nil
	}

410
411
412
413
414
	if runnerErr != nil {
		// this is the error returned from the llama runner process that failed most recently
		return nil, runnerErr
	}

415
	return nil, fmt.Errorf("failed to start a llama runner")
416
417
418
419
}

func waitForServer(llm *llama) error {
	start := time.Now()
420
	expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
Bruce MacDonald's avatar
Bruce MacDonald committed
421
	ticker := time.NewTicker(200 * time.Millisecond)
422
	defer ticker.Stop()
423

424
	log.Print("waiting for llama runner to start responding")
425
426
427
428
	for {
		select {
		case <-llm.exitCh:
			// failed to start subprocess
429
			return fmt.Errorf("llama runner process has terminated")
430
431
432
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
433
				return fmt.Errorf("timed out waiting for llama runner to start")
434
			}
435

436
437
438
439
440
			if err := llm.Ping(context.Background()); err == nil {
				// success
				log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
				return nil
			}
441
442
443
444
445
		}
	}
}

func (llm *llama) Close() {
446
	// signal the sub-process to terminate
Bruce MacDonald's avatar
Bruce MacDonald committed
447
	llm.Cancel()
448
449

	// wait for the command to exit to prevent race conditions with the next run
450
451
	<-llm.exitCh

452
453
	if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
		log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
454
455
	} else {
		log.Print("llama runner stopped successfully")
456
	}
457
458
459
460
461
462
}

func (llm *llama) SetOptions(opts api.Options) {
	llm.Options = opts
}

Michael Yang's avatar
Michael Yang committed
463
type prediction struct {
Michael Yang's avatar
Michael Yang committed
464
465
466
467
468
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

Michael Yang's avatar
Michael Yang committed
469
470
471
472
473
474
	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
475
476
}

Michael Yang's avatar
Michael Yang committed
477
const maxBufferSize = 512 * format.KiloByte
478

479
480
func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, fn func(api.GenerateResponse)) error {
	prevConvo, err := llm.Decode(ctx, prevContext)
481
	if err != nil {
482
		return err
483
	}
484

485
	// Remove leading spaces from prevConvo if present
486
	prevConvo = strings.TrimPrefix(prevConvo, " ")
487

488
489
490
491
	var nextContext strings.Builder
	nextContext.WriteString(prevConvo)
	nextContext.WriteString(prompt)

Michael Yang's avatar
Michael Yang committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
	request := map[string]any{
		"prompt":            nextContext.String(),
		"stream":            true,
		"n_predict":         llm.NumPredict,
		"n_keep":            llm.NumKeep,
		"temperature":       llm.Temperature,
		"top_k":             llm.TopK,
		"top_p":             llm.TopP,
		"tfs_z":             llm.TFSZ,
		"typical_p":         llm.TypicalP,
		"repeat_last_n":     llm.RepeatLastN,
		"repeat_penalty":    llm.RepeatPenalty,
		"presence_penalty":  llm.PresencePenalty,
		"frequency_penalty": llm.FrequencyPenalty,
		"mirostat":          llm.Mirostat,
		"mirostat_tau":      llm.MirostatTau,
		"mirostat_eta":      llm.MirostatEta,
		"penalize_nl":       llm.PenalizeNewline,
		"seed":              llm.Seed,
		"stop":              llm.Stop,
512
	}
513

514
	// Handling JSON marshaling with special characters unescaped.
515
516
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
517
518
	enc.SetEscapeHTML(false)

Michael Yang's avatar
Michael Yang committed
519
	if err := enc.Encode(request); err != nil {
520
		return fmt.Errorf("failed to marshal data: %v", err)
521
522
	}

Michael Yang's avatar
Michael Yang committed
523
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
524
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(resp.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(resp.Body)
546
547
548
	// increase the buffer size to avoid running out of space
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
549
550
551
552
553
554
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
Michael Yang's avatar
Michael Yang committed
555
556
			line := scanner.Bytes()
			if len(line) == 0 {
557
558
559
				continue
			}

Michael Yang's avatar
Michael Yang committed
560
			if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
Michael Yang's avatar
Michael Yang committed
561
				var p prediction
Michael Yang's avatar
Michael Yang committed
562
				if err := json.Unmarshal(evt, &p); err != nil {
Michael Yang's avatar
Michael Yang committed
563
					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
564
565
				}

Michael Yang's avatar
Michael Yang committed
566
567
568
569
				if p.Content != "" {
					fn(api.GenerateResponse{Response: p.Content})
					nextContext.WriteString(p.Content)
				}
Michael Yang's avatar
Michael Yang committed
570
571

				if p.Stop {
572
					embd, err := llm.Encode(ctx, nextContext.String())
573
574
575
					if err != nil {
						return fmt.Errorf("encoding context: %v", err)
					}
576

577
578
579
					fn(api.GenerateResponse{
						Done:               true,
						Context:            embd,
Michael Yang's avatar
Michael Yang committed
580
581
582
583
						PromptEvalCount:    p.Timings.PromptN,
						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
						EvalCount:          p.Timings.PredictedN,
						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
584
585
					})

Michael Yang's avatar
Michael Yang committed
586
					return nil
587
588
589
590
591
592
				}
			}
		}
	}

	if err := scanner.Err(); err != nil {
593
594
595
596
597
598
599
600
		if strings.Contains(err.Error(), "unexpected EOF") {
			// this means the llama runner subprocess crashed
			llm.Close()
			if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
				return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
			}
			return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
		}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
	if len(tokens) == 0 {
		return "", nil
	}
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
	data, err := json.Marshal(TokenizeRequest{Content: input})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("POST embedding: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

// Ping checks that the server subprocess is still running and responding to requests
func (llm *llama) Ping(ctx context.Context) error {
Bruce MacDonald's avatar
Bruce MacDonald committed
747
	resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
748
749
750
751
752
753
754
755
	if err != nil {
		return fmt.Errorf("ping resp: %w", err)
	}
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("unexpected ping status: %s", resp.Status)
	}
	return nil
}