server.go 31.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
36
	Embed(ctx context.Context, input []string) (*EmbedResponse, error)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
37
38
39
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
47
48
49
50
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
51
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
52

53
54
55
56
57
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
58
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60

	sem *semaphore.Weighted
61
62
}

63
64
65
66
67
68
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
71
72
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

73
74
75
76
77
78
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

79
	ggml, _, err := DecodeGGML(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
80
81
	return ggml, err
}
82

Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
Daniel Hiltgen's avatar
Daniel Hiltgen committed
85
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
	var err error
87
	var cpuRunner string
88
	var estimate MemoryEstimate
89
90
	var systemTotalMemory uint64
	var systemFreeMemory uint64
91
	var systemSwapFreeMemory uint64
92
93
94
95
96
97
98

	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
99
100
		systemSwapFreeMemory = systemMemInfo.FreeSwap
		slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
101
	}
102

103
104
105
106
107
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
108
		cpuRunner = serverForCpu()
109
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
110
	} else {
111
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
112

Michael Yang's avatar
Michael Yang committed
113
		switch {
114
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115
116
117
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
118
		case gpus[0].Library != "metal" && estimate.Layers == 0:
119
120
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
121
			gpus = gpu.GetCPUInfo()
122
123
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
124
125
126
		}
	}

127
128
129
	// On linux, over-allocating CPU memory will almost always result in an error
	if runtime.GOOS == "linux" {
		systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
130
		available := systemFreeMemory + systemSwapFreeMemory
131
132
133
		if systemMemoryRequired > available {
			slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
			return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
134
135
136
		}
	}

137
138
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
139
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
140
	finalErr := errors.New("no suitable llama servers found")
141
142
143
144
145

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

146
147
148
149
150
151
152
153
154
155
156
157
158
159
	availableServers := getAvailableServers()
	if len(availableServers) == 0 {
		if runtime.GOOS != "windows" {
			slog.Warn("llama server binary disappeared, reinitializing payloads")
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
			availableServers = getAvailableServers()
		} else {
			return nil, finalErr
		}
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
160
161
162
163
164
165
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
Michael Yang's avatar
string  
Michael Yang committed
166
	demandLib := envconfig.LLMLibrary()
167
168
169
170
171
172
173
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
174
175
176
177
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
178
179
180
181
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
182
		return nil, fmt.Errorf("no servers found for %v", gpus)
183
184
185
186
	}

	params := []string{
		"--model", model,
Michael Yang's avatar
lint  
Michael Yang committed
187
188
		"--ctx-size", strconv.Itoa(opts.NumCtx),
		"--batch-size", strconv.Itoa(opts.NumBatch),
189
190
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
191
192

	params = append(params, "--log-disable")
193

Michael Yang's avatar
Michael Yang committed
194
	if opts.NumGPU >= 0 {
Michael Yang's avatar
lint  
Michael Yang committed
195
		params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
196
197
	}

Michael Yang's avatar
Michael Yang committed
198
	if envconfig.Debug() {
199
200
201
202
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
203
		params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
204
205
206
207
208
209
210
211
212
213
214
215
216
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
Michael Yang's avatar
lint  
Michael Yang committed
217
		params = append(params, "--threads", strconv.Itoa(opts.NumThread))
218
219
220
221
222
223
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

Michael Yang's avatar
bool  
Michael Yang committed
224
	flashAttnEnabled := envconfig.FlashAttention()
Sam's avatar
Sam committed
225
226

	for _, g := range gpus {
227
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
228
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
229
			flashAttnEnabled = false
Sam's avatar
Sam committed
230
		}
231
232
233
234
235

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
236
237
			opts.UseMMap = new(bool)
			*opts.UseMMap = false
238
		}
Sam's avatar
Sam committed
239
	}
240

241
	if flashAttnEnabled {
Sam's avatar
Sam committed
242
243
244
		params = append(params, "--flash-attn")
	}

245
	// Windows CUDA should not use mmap for best performance
246
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
247
	// For CPU loads we want the memory to be allocated, not FS cache
248
249
250
251
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
		(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
		(opts.UseMMap != nil && !*opts.UseMMap) {
252
253
254
255
256
257
258
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

259
260
261
262
263
264
265
266
	if gpu.IsNUMA() {
		numaMode := "distribute"
		if runtime.GOOS == "linux" {
			if _, err := exec.LookPath("numactl"); err == nil {
				numaMode = "numactl"
			}
		}
		params = append(params, "--numa", numaMode)
267
268
	}

Michael Yang's avatar
lint  
Michael Yang committed
269
	params = append(params, "--parallel", strconv.Itoa(numParallel))
Daniel Hiltgen's avatar
Daniel Hiltgen committed
270

271
272
273
274
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

Michael Yang's avatar
lint  
Michael Yang committed
275
	for i := range len(servers) {
276
		dir := availableServers[servers[i]]
277
278
279
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
280
			slog.Error("server list inconsistent", "error", finalErr)
281
282
			continue
		}
283

Daniel Hiltgen's avatar
Daniel Hiltgen committed
284
		if strings.HasPrefix(servers[i], "cpu") {
285
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
286
287
		}

288
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
307
308
		// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
		libraryPaths := []string{dir, filepath.Dir(dir)}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
309

310
311
312
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
313
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
314
315
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
316
317
318
319
320
321
322
323
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

324
325
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
326
			server += ".exe"
327
328
		}

329
330
331
332
333
334
335
336
337
338
339
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
340
		s := &llmServer{
341
342
343
344
345
346
347
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
348
			gpus:        gpus,
349
			done:        make(chan error, 1),
350
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
351

352
		s.cmd.Env = os.Environ()
353
354
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status
355
		s.cmd.SysProcAttr = LlamaServerSysProcAttr
356

Daniel Hiltgen's avatar
Daniel Hiltgen committed
357
358
359
360
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
361
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
362
363
364
365
366
367
368
369
370
371
372
373
374
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
375
376
377
378
379
380
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
381
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
382
		}
383
384
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
385
		}
386
387
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
388
389
		}

390
		slog.Info("starting llama server", "cmd", s.cmd.String())
Michael Yang's avatar
Michael Yang committed
391
		if envconfig.Debug() {
392
393
394
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
395
					strings.HasPrefix(ev, "ROCR_") ||
396
397
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
398
					strings.HasPrefix(ev, "GPU_") ||
399
400
401
402
403
404
405
406
407
408
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
409
410

		if err = s.cmd.Start(); err != nil {
411
412
413
414
415
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
416
417
418
419
420
421
422
423
424
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
425
426
		// reap subprocess when it exits
		go func() {
427
428
429
430
431
432
433
			err := s.cmd.Wait()
			// Favor a more detailed message over the process exit status
			if err != nil && s.status != nil && s.status.LastErrMsg != "" {
				slog.Debug("llama runner terminated", "error", err)
				if strings.Contains(s.status.LastErrMsg, "unknown model") {
					s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
				}
Michael Yang's avatar
lint  
Michael Yang committed
434
				s.done <- errors.New(s.status.LastErrMsg)
435
436
437
			} else {
				s.done <- err
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
438
439
		}()

440
441
442
443
444
445
446
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
447
func projectorMemoryRequirements(filename string) uint64 {
448
449
450
451
452
453
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

454
	ggml, _, err := DecodeGGML(file, 0)
455
456
457
458
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
459
460
461
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
462
463
	}

Michael Yang's avatar
Michael Yang committed
464
	return mem
465
466
467
468
469
470
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
471
	ServerStatusNoSlotsAvailable
472
473
474
475
476
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
477
478
479
480
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
481
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
482
483
484
485
486
487
488
489
490
491
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

492
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
493
494
495
496
497
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
498
499
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
500
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
501
502
503
504
505
506
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
507
508
509
510
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
511
512
513
514
515
516
517
518
519
520
521
522
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
523
			return ServerStatusNotResponding, errors.New("server not responding")
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
543
		return ServerStatusNoSlotsAvailable, nil
544
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
545
		s.loadProgress = status.Progress
546
547
548
549
550
551
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
575
func (s *llmServer) Ping(ctx context.Context) error {
576
577
578
579
580
581
582
583
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
584
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
585
	start := time.Now()
586
587
588
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
589
590
591

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
592
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
593

594
595
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
596
		case <-ctx.Done():
597
			slog.Warn("client connection closed before server finished loading, aborting load")
598
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
599
		case err := <-s.done:
600
			return fmt.Errorf("llama runner process has terminated: %w", err)
601
602
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
603
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
604
			// timeout
605
606
607
608
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
609
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
610
611
612
613
614
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
615
			}
ManniX-ITA's avatar
ManniX-ITA committed
616
617
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
618
619
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
620
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
621
622
623
624
625
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
626
627
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
628
629
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
630
631
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
632
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
633
634
635
636
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
637
638
639
640
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
641
			}
ManniX-ITA's avatar
ManniX-ITA committed
642
643
			time.Sleep(time.Millisecond * 250)
			continue
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
666
    [^"\\\x7F\x00-\x1F] |
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
684
685
686
687
688
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
689
690
691
692
693
694
695
696
697
698
699
700
701

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
Michael Yang's avatar
Michael Yang committed
702
	Options *api.Options
703
704
705
706
}

type CompletionResponse struct {
	Content            string
707
	DoneReason         string
708
709
710
711
712
713
714
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
715
716
717
718
719
720
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
721

722
	// put an upper limit on num_predict to avoid the model running on forever
723
724
725
726
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
	}

727
728
729
730
731
732
733
734
735
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
736
		"min_p":             req.Options.MinP,
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
754
	status, err := s.getServerStatusRetry(ctx)
755
756
757
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
758
		return fmt.Errorf("unexpected server status: %s", status.ToString())
759
760
761
762
763
764
765
766
767
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

768
769
770
771
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
772

773
774
775
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
776

777
778
779
780
781
782
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
783

784
785
786
787
788
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
789

790
791
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
792
		if err != nil {
793
			return fmt.Errorf("failed reading llm error response: %w", err)
794
		}
795
796
797
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
798

799
800
801
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
802

803
804
805
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
806

807
808
809
810
811
812
813
814
815
816
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
817

818
819
820
821
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
822

823
824
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
825
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
826
			}
827

828
829
830
831
832
833
834
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
835

836
837
838
839
840
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
841

842
843
844
845
846
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
847

848
			if c.Stop {
849
850
851
852
853
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

854
855
				fn(CompletionResponse{
					Done:               true,
856
					DoneReason:         doneReason,
857
858
859
860
861
862
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
863
864
			}
		}
865
	}
866

867
868
869
870
871
872
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
873
			}
874
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
875
876
		}

877
		return fmt.Errorf("error reading llm response: %v", err)
878
879
	}

880
	return nil
881
882
}

883
884
type EmbedRequest struct {
	Content []string `json:"content"`
885
886
}

887
type EmbedResponse struct {
888
889
	Embedding       [][]float32 `json:"embedding"`
	PromptEvalCount int         `json:"prompt_n"`
890
891
}

892
func (s *llmServer) Embed(ctx context.Context, input []string) (*EmbedResponse, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
893
894
895
896
897
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
898

899
	// Make sure the server is ready
900
	status, err := s.getServerStatusRetry(ctx)
901
902
903
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
904
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
905
906
	}

907
	data, err := json.Marshal(EmbedRequest{Content: input})
Michael Yang's avatar
Michael Yang committed
908
	if err != nil {
909
910
911
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
912
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

934
935
	var e EmbedResponse
	if err := json.Unmarshal(body, &e); err != nil {
936
937
938
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

939
	return &e, nil
940
941
}

Michael Yang's avatar
Michael Yang committed
942
943
944
945
946
947
948
949
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
950
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
1000
1001
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1002
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1044
1045
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1046
func (s *llmServer) Close() error {
1047
1048
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1049
1050
1051
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1052
1053
1054
1055
1056
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1057
1058

		slog.Debug("llama server stopped")
1059
1060
1061
1062
1063
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1064
func (s *llmServer) EstimatedVRAM() uint64 {
1065
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1066
1067
}

1068
func (s *llmServer) EstimatedTotal() uint64 {
1069
	return s.estimate.TotalSize
1070
1071
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1072
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1073
1074
1075
1076
1077
1078
1079
1080
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1081
1082
1083
1084
1085
1086
1087
1088
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}