server.go 31.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
36
	Embed(ctx context.Context, input []string) (*EmbedResponse, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
37
38
39
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
47
48
49
50
51
52
	port        int
	cmd         *exec.Cmd
	done        chan error // Channel to signal when the process exits
	status      *StatusWriter
	options     api.Options
	numParallel int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
53

54
55
56
57
58
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
59
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
60
61

	sem *semaphore.Weighted
62
63
}

64
65
66
67
68
69
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
70
71
72
73
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

74
75
76
77
78
79
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

80
	ggml, _, err := DecodeGGML(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
81
82
	return ggml, err
}
83

Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
85
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
	var err error
88
	var cpuRunner string
89
	var estimate MemoryEstimate
90
91
	var systemTotalMemory uint64
	var systemFreeMemory uint64
92
	var systemSwapFreeMemory uint64
93
94
95
96
97
98
99

	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
100
101
		systemSwapFreeMemory = systemMemInfo.FreeSwap
		slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
102
	}
103

104
105
106
107
108
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
109
		cpuRunner = serverForCpu()
110
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
111
	} else {
112
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113

Michael Yang's avatar
Michael Yang committed
114
		switch {
115
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
116
117
118
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
119
		case gpus[0].Library != "metal" && estimate.Layers == 0:
120
121
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
122
			gpus = gpu.GetCPUInfo()
123
124
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
125
126
127
		}
	}

128
129
130
	// On linux and windows, over-allocating CPU memory will almost always result in an error
	// Darwin has fully dynamic swap so has no direct concept of free swap space
	if runtime.GOOS != "darwin" {
131
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
132
		available := systemFreeMemory + systemSwapFreeMemory
133
134
135
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
136
137
138
		}
	}

139
140
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
141
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
142
	finalErr := errors.New("no suitable llama servers found")
143
144
145
146
147

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

148
149
150
151
152
153
154
155
156
157
158
159
160
161
	availableServers := getAvailableServers()
	if len(availableServers) == 0 {
		if runtime.GOOS != "windows" {
			slog.Warn("llama server binary disappeared, reinitializing payloads")
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
			availableServers = getAvailableServers()
		} else {
			return nil, finalErr
		}
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
162
163
164
165
166
167
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
Michael Yang's avatar
string  
Michael Yang committed
168
	demandLib := envconfig.LLMLibrary()
169
170
171
172
173
174
175
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
176
177
178
179
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
180
181
182
183
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
184
		return nil, fmt.Errorf("no servers found for %v", gpus)
185
186
187
188
	}

	params := []string{
		"--model", model,
Michael Yang's avatar
lint  
Michael Yang committed
189
190
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
191
192
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
193
194

	params = append(params, "--log-disable")
195

Michael Yang's avatar
Michael Yang committed
196
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
197
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
198
199
	}

Michael Yang's avatar
Michael Yang committed
200
	if envconfig.Debug() {
201
202
203
204
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
205
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
206
207
208
209
210
211
212
213
214
215
216
217
218
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
219
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
220
221
222
223
224
225
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

Michael Yang's avatar
bool  
Michael Yang committed
226
	flashAttnEnabled := envconfig.FlashAttention()
Sam's avatar
Sam committed
227
228

	for _, g := range gpus {
229
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
230
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
231
			flashAttnEnabled = false
Sam's avatar
Sam committed
232
		}
233
234
235
236
237

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
238
239
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
240
		}
Sam's avatar
Sam committed
241
	}
242

243
	if flashAttnEnabled {
Sam's avatar
Sam committed
244
245
246
		params = append(params, "--flash-attn")
	}

247
	// Windows CUDA should not use mmap for best performance
248
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
249
	// For CPU loads we want the memory to be allocated, not FS cache
250
251
252
253
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
254
255
256
257
258
259
260
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

261
262
263
264
265
266
267
268
	if gpu.IsNUMA() {
		numaMode := "distribute"
		if runtime.GOOS == "linux" {
			if _, err := exec.LookPath("numactl"); err == nil {
				numaMode = "numactl"
			}
		}
		params = append(params, "--numa", numaMode)
269
270
	}

Michael Yang's avatar
lint  
Michael Yang committed
271
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
272

273
274
275
276
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

Michael Yang's avatar
lint  
Michael Yang committed
277
	for i := range len(servers) {
278
		dir := availableServers[servers[i]]
279
280
281
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
282
			slog.Error("server list inconsistent", "error", finalErr)
283
284
			continue
		}
285

Daniel Hiltgen's avatar
Daniel Hiltgen committed
286
		if strings.HasPrefix(servers[i], "cpu") {
287
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
288
289
		}

290
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
309
310
		// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
		libraryPaths := []string{dir, filepath.Dir(dir)}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
311

312
313
314
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
315
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
316
317
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
318
319
320
321
322
323
324
325
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

326
327
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
328
			server += ".exe"
329
330
		}

331
332
333
334
335
336
337
338
339
340
341
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
342
		s := &llmServer{
343
344
345
346
347
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
348
			numParallel: numParallel,
349
350
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
351
			gpus:        gpus,
352
			done:        make(chan error, 1),
353
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
354

355
		s.cmd.Env = os.Environ()
356
357
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
358
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
359

Daniel Hiltgen's avatar
Daniel Hiltgen committed
360
361
362
363
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
364
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
365
366
367
368
369
370
371
372
373
374
375
376
377
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
378
379
380
381
382
383
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
384
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
385
		}
386
387
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
388
		}
389
390
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
391
392
		}

393
		slog.Info("starting llama server", "cmd", s.cmd.String())
Michael Yang's avatar
Michael Yang committed
394
		if envconfig.Debug() {
395
396
397
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
398
					strings.HasPrefix(ev, "ROCR_") ||
399
400
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
401
					strings.HasPrefix(ev, "GPU_") ||
402
403
404
405
406
407
408
409
410
411
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
412
413

		if err = s.cmd.Start(); err != nil {
414
415
416
417
418
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
419
420
421
422
423
424
425
426
427
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
428
429
		// reap subprocess when it exits
		go func() {
430
431
432
433
434
435
436
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
				slog.Debug("llama runner terminated", "error", err)
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
437
				s.done <- errors.New(s.status.LastErrMsg)
438
439
440
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
441
442
		}()

443
444
445
446
447
448
449
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
450
func projectorMemoryRequirements(filename string) uint64 {
451
452
453
454
455
456
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

457
	ggml, _, err := DecodeGGML(file, 0)
458
459
460
461
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
462
463
464
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
465
466
	}

Michael Yang's avatar
Michael Yang committed
467
	return mem
468
469
470
471
472
473
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
474
	ServerStatusNoSlotsAvailable
475
476
477
478
479
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
480
481
482
483
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
484
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
485
486
487
488
489
490
491
492
493
494
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

495
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
496
497
498
499
500
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
501
502
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
503
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
504
505
506
507
508
509
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
510
511
512
513
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
514
515
516
517
518
519
520
521
522
523
524
525
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
526
			return ServerStatusNotResponding, errors.New("server not responding")
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
546
		return ServerStatusNoSlotsAvailable, nil
547
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
548
		s.loadProgress = status.Progress
549
550
551
552
553
554
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
578
func (s *llmServer) Ping(ctx context.Context) error {
579
580
581
582
583
584
585
586
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
587
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
588
	start := time.Now()
589
590
591
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
592
593
594

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
595
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
596

597
598
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
599
		case <-ctx.Done():
600
			slog.Warn("client connection closed before server finished loading, aborting load")
601
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
602
		case err := <-s.done:
603
			return fmt.Errorf("llama runner process has terminated: %w", err)
604
605
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
606
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
607
			// timeout
608
609
610
611
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
612
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
613
614
615
616
617
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
618
			}
ManniX-ITA's avatar
ManniX-ITA committed
619
620
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
621
622
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
623
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
624
625
626
627
628
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
629
630
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
631
632
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
633
634
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
635
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
636
637
638
639
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
640
641
642
643
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
644
			}
ManniX-ITA's avatar
ManniX-ITA committed
645
646
			time.Sleep(time.Millisecond * 250)
			continue
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
669
    [^"\\\x7F\x00-\x1F] |
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
687
688
689
690
691
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
692
693
694
695
696
697
698
699
700
701
702
703
704

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
705
	Options *api.Options
706
707
708
709
}

type CompletionResponse struct {
	Content            string
710
	DoneReason         string
711
712
713
714
715
716
717
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
718
719
720
721
722
723
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
724

725
	// put an upper limit on num_predict to avoid the model running on forever
726
727
728
729
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

730
731
732
733
734
735
736
737
738
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
739
		"min_p":             req.Options.MinP,
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
757
	status, err := s.getServerStatusRetry(ctx)
758
759
760
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
761
		return fmt.Errorf("unexpected server status: %s", status.ToString())
762
763
764
765
766
767
768
769
770
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

771
772
773
774
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
775

776
777
778
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
779

780
781
782
783
784
785
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
786

787
788
789
790
791
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
792

793
794
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
795
		if err != nil {
796
			return fmt.Errorf("failed reading llm error response: %w", err)
797
		}
798
799
800
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
801

802
803
804
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
805

806
807
808
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
809

810
811
812
813
814
815
816
817
818
819
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
820

821
822
823
824
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
825

826
827
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
828
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
829
			}
830

831
832
833
834
835
836
837
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
838

839
840
841
842
843
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
844

845
846
847
848
849
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
850

851
			if c.Stop {
852
853
854
855
856
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

857
858
				fn(CompletionResponse{
					Done:               true,
859
					DoneReason:         doneReason,
860
861
862
863
864
865
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
866
867
			}
		}
868
	}
869

870
871
872
873
874
875
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
876
			}
877
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
878
879
		}

880
		return fmt.Errorf("error reading llm response: %v", err)
881
882
	}

883
	return nil
884
885
}

886
887
type EmbedRequest struct {
	Content []string `json:"content"`
888
889
}

890
type EmbedResponse struct {
891
892
	Embedding       [][]float32 `json:"embedding"`
	PromptEvalCount int         `json:"prompt_n"`
893
894
}

895
func (s *llmServer) Embed(ctx context.Context, input []string) (*EmbedResponse, error) {
896
897
898
899
	// each input will use a slot, so we need to acquire the semaphore for
	// the number of inputs up to numParallel
	slots := int64(min(len(input), s.numParallel))
	if err := s.sem.Acquire(ctx, slots); err != nil {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
900
901
902
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
903
	defer s.sem.Release(slots)
904

905
	// Make sure the server is ready
906
	status, err := s.getServerStatusRetry(ctx)
907
908
909
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
910
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
911
912
	}

913
	data, err := json.Marshal(EmbedRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
914
	if err != nil {
915
916
917
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
918
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

940
941
	var e EmbedResponse
	if err := json.Unmarshal(body, &e); err != nil {
942
943
944
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

945
	return &e, nil
946
947
}

Michael Yang's avatar
Michael Yang committed
948
949
950
951
952
953
954
955
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
956
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
1006
1007
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1008
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1050
1051
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1052
func (s *llmServer) Close() error {
1053
1054
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1055
1056
1057
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1058
1059
1060
1061
1062
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1063
1064

		slog.Debug("llama server stopped")
1065
1066
1067
1068
1069
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1070
func (s *llmServer) EstimatedVRAM() uint64 {
1071
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1072
1073
}

1074
func (s *llmServer) EstimatedTotal() uint64 {
1075
	return s.estimate.TotalSize
1076
1077
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1078
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1079
1080
1081
1082
1083
1084
1085
1086
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1087
1088
1089
1090
1091
1092
1093
1094
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}