server.go 24.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
27
28
	"github.com/ollama/ollama/api"
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
29
	"github.com/ollama/ollama/server/envconfig"
30
31
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
40
41
42
43
44
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
	EstimatedVRAM() uint64
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
45
46
47
48
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
49
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
50
51
52
53
54

	// TODO - this should be broken down by GPU
	estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model

	sem *semaphore.Weighted
55
56
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
57
58
59
60
61
func LoadModel(model string) (*GGML, error) {
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

62
63
64
65
66
67
68
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	ggml, _, err := DecodeGGML(f)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
	return ggml, err
}
71

Daniel Hiltgen's avatar
Daniel Hiltgen committed
72
73
74
75
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
	var err error
76
	if opts.NumCtx > int(ggml.KV().ContextLength()) {
77
		slog.Warn("requested context length is greater than the model's training context window size", "requested", opts.NumCtx, "training size", ggml.KV().ContextLength())
78
79
80
81
82
83
	}

	if opts.NumCtx < 4 {
		opts.NumCtx = 4
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
85
86
87
	cpuRunner := ""
	var estimatedVRAM uint64
	var systemMemory uint64
	if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
88

Daniel Hiltgen's avatar
Daniel Hiltgen committed
89
		// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
90

Daniel Hiltgen's avatar
Daniel Hiltgen committed
91
92
93
94
95
96
97
98
99
100
		cpuRunner = serverForCpu()
	} else {
		if gpus[0].Library == "metal" {
			memInfo, err := gpu.GetCPUMem()
			if err != nil {
				slog.Error("failed to lookup system memory", "error", err)
			} else {
				systemMemory = memInfo.TotalMemory
				slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
			}
101
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
102
103
104
105
106
107
108
109
110
		var layers int
		layers, estimatedVRAM = EstimateGPULayers(gpus, ggml, projectors, opts)

		if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
		} else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
			opts.NumGPU = layers
111
112
113
		}
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
114
115
	// Loop through potential servers
	finalErr := fmt.Errorf("no suitable llama servers found")
116
117
118
119
120
121

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	availableServers := availableServers()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
122
123
124
125
126
127
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
128
	demandLib := envconfig.LLMLibrary
129
130
131
132
133
134
135
136
137
138
139
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
140
		return nil, fmt.Errorf("no servers found for %v", gpus)
141
142
143
144
145
146
147
148
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
149
	if envconfig.Debug {
150
151
152
153
154
		params = append(params, "--log-format", "json")
	} else {
		params = append(params, "--log-disable")
	}

Michael Yang's avatar
Michael Yang committed
155
	if opts.NumGPU >= 0 {
156
157
158
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

159
	if envconfig.Debug {
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

197
	numParallel := envconfig.NumParallel
198
199
200
201
202
203
204
205

	// TODO (jmorganca): multimodal models don't support parallel yet
	// see https://github.com/ollama/ollama/issues/4165
	if len(projectors) > 0 {
		numParallel = 1
		slog.Warn("multimodal models don't support parallel requests yet")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
206
207
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

208
209
	for i := 0; i < len(servers); i++ {
		dir := availableServers[servers[i]]
210
211
212
213
214
215
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
			slog.Error("sever list inconsistent", "error", finalErr)
			continue
		}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237

		// Find an availableServers  port, retry on each iterration in case the failure was a port conflict race
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
		// append the server directory to LD_LIBRARY_PATH/PATH
		libraryPaths := []string{dir}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
238

239
240
241
242
243
244
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
			libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...)
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
245
246
247
248
249
250
251
252
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

253
254
255
256
257
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
			server = server + ".exe"
		}

258
259
260
261
262
263
264
265
266
267
268
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
269
270
271
272
273
274
275
		s := &llmServer{
			port:          port,
			cmd:           exec.Command(server, finalParams...),
			status:        NewStatusWriter(os.Stderr),
			options:       opts,
			estimatedVRAM: estimatedVRAM,
			sem:           semaphore.NewWeighted(int64(numParallel)),
276
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
277

278
279
280
281
282
		libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator)))
		s.cmd.Env = append(os.Environ(), libEnv)
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

Daniel Hiltgen's avatar
Daniel Hiltgen committed
283
284
285
286
287
288
		// TODO - multiple GPU selection logic...
		key, val := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
		if key != "" {
			s.cmd.Env = append(s.cmd.Env, key+"="+val)
		}

289
		slog.Info("starting llama server", "cmd", s.cmd.String())
Daniel Hiltgen's avatar
Daniel Hiltgen committed
290
291
		// Log at debug as the environment is inherited and might contain sensitive information
		slog.Debug("subprocess", "environment", s.cmd.Env)
292
293
294
295
296
297
298
299
300
301
302

		if err = s.cmd.Start(); err != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
303
304
305
306
307
308
309
		// TODO - make sure this is all wired up correctly
		// if err = s.WaitUntilRunning(); err != nil {
		// 	slog.Error("error starting llama server", "server", servers[i], "error", err)
		// 	s.Close()
		// 	finalErr = err
		// 	continue
		// }
310
311
312
313
314
315
316
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
317
func projectorMemoryRequirements(filename string) uint64 {
318
319
320
321
322
323
324
325
326
327
328
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

	ggml, _, err := DecodeGGML(file)
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
329
330
331
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
332
333
	}

Michael Yang's avatar
Michael Yang committed
334
	return mem
335
336
337
338
339
340
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
341
	ServerStatusNoSlotsAvailable
342
343
344
345
346
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
347
348
349
350
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
351
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
352
353
354
355
356
357
358
359
360
361
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

362
363
364
365
366
367
368
type ServerStatusResp struct {
	Status          string `json:"status"`
	SlotsIdle       int    `json:"slots_idle"`
	SlotsProcessing int    `json:"slots_processing"`
	Error           string `json:"error"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
369
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
			return ServerStatusNotResponding, fmt.Errorf("server not responding")
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
408
		return ServerStatusNoSlotsAvailable, nil
409
410
411
412
413
414
415
	case "loading model":
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
439
func (s *llmServer) Ping(ctx context.Context) error {
440
441
442
443
444
445
446
447
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
448
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
449
	start := time.Now()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
450
451
	// TODO we need to wire up a better way to detect hangs during model load and startup of the server
	expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
452
453
454
455
456
457
458
	ticker := time.NewTicker(50 * time.Millisecond)
	defer ticker.Stop()

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
459
460
		case <-ctx.Done():
			slog.Info("context expired before server started")
461
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
		case <-ticker.C:
			if time.Now().After(expiresAt) {
				// timeout
				msg := ""
				if s.status != nil && s.status.LastErrMsg != "" {
					msg = s.status.LastErrMsg
				}
				return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
			}
			if s.cmd.ProcessState != nil {
				msg := ""
				if s.status != nil && s.status.LastErrMsg != "" {
					msg = s.status.LastErrMsg
				}
				return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
			}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
485
			c, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
486
			defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
487
			status, err := s.getServerStatus(c)
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
			if err != nil && lastStatus != status {
				slog.Debug("server not yet available", "error", err)
				lastStatus = status
				continue
			}

			switch status {
			case ServerStatusLoadingModel:
				// TODO - this state never seems to happen with the current server.cpp code (bug?)
				// it doesn't respond to the health endpoint until after the model is loaded
				slog.Debug("loading model")
			case ServerStatusReady:
				slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
				return nil
			}
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
	Content string `json:"content"`
	Model   string `json:"model"`
	Prompt  string `json:"prompt"`
	Stop    bool   `json:"stop"`

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
572
573
574
575
576
577
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
578
579
580
581
582
583
584

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
611
	status, err := s.getServerStatusRetry(ctx)
612
613
614
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
615
		return fmt.Errorf("unexpected server status: %s", status.ToString())
616
617
618
619
620
621
622
623
624
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

625
626
627
628
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
629

630
631
632
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
633

634
635
636
637
638
639
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
640

641
642
643
644
645
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
646

647
648
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
649
		if err != nil {
650
			return fmt.Errorf("failed reading llm error response: %w", err)
651
		}
652
653
654
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
655

656
657
658
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
659

660
661
662
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
663

664
665
666
667
668
669
670
671
672
673
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
674

675
676
677
678
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
679

680
681
682
683
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
				return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
			}
684

685
686
687
688
689
690
691
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
692

693
694
695
696
697
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
698

699
700
701
702
703
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
704

705
706
707
708
709
710
711
712
713
			if c.Stop {
				fn(CompletionResponse{
					Done:               true,
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
714
715
			}
		}
716
	}
717

718
719
720
721
722
723
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
724
			}
725
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
726
727
		}

728
		return fmt.Errorf("error reading llm response: %v", err)
729
730
	}

731
	return nil
732
733
734
735
736
737
738
739
740
741
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
742
743
744
745
746
747
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
748

749
	// Make sure the server is ready
750
	status, err := s.getServerStatusRetry(ctx)
751
752
753
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
754
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
	}

	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
800
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
801
802
803
804
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
805
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
806
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
852
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
853
854
855
856
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
857
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
858
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
896
func (s *llmServer) Close() error {
897
898
	if s.cmd != nil {
		slog.Debug("stopping llama server")
899
900
901
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
902

903
		_ = s.cmd.Wait()
904
905

		slog.Debug("llama server stopped")
906
907
908
909
910
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
911
912
913
914
func (s *llmServer) EstimatedVRAM() uint64 {
	return s.estimatedVRAM
}

915
916
917
918
919
920
921
922
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}