server.go 30 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
47
48
49
50
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
51
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
52

53
54
55
56
57
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
58
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60

	sem *semaphore.Weighted
61
62
}

63
64
65
66
67
68
// LoadModel will load a model from disk. The model must be in the GGML format.
//
// It collects array values for arrays with a size less than or equal to
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
// the maxArraySize is negative, all arrays are collected.
func LoadModel(model string, maxArraySize int) (*GGML, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
71
72
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

73
74
75
76
77
78
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

79
	ggml, _, err := DecodeGGML(f, maxArraySize)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
80
81
	return ggml, err
}
82

Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
Daniel Hiltgen's avatar
Daniel Hiltgen committed
85
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
	var err error
87
	var cpuRunner string
88
	var estimate MemoryEstimate
89
90
91
92
93
94
95
96
97
98
99
	var systemTotalMemory uint64
	var systemFreeMemory uint64

	systemMemInfo, err := gpu.GetCPUMem()
	if err != nil {
		slog.Error("failed to lookup system memory", "error", err)
	} else {
		systemTotalMemory = systemMemInfo.TotalMemory
		systemFreeMemory = systemMemInfo.FreeMemory
		slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
	}
100

101
102
103
104
105
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
106
		cpuRunner = serverForCpu()
107
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
108
	} else {
109
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
110

Michael Yang's avatar
Michael Yang committed
111
		switch {
112
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113
114
115
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
116
		case gpus[0].Library != "metal" && estimate.Layers == 0:
117
118
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
119
			gpus = gpu.GetCPUInfo()
120
121
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
122
123
124
		}
	}

125
126
	estimate.log()

Daniel Hiltgen's avatar
Daniel Hiltgen committed
127
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
128
	finalErr := errors.New("no suitable llama servers found")
129
130
131
132
133

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

134
135
136
137
138
139
140
141
142
143
144
145
146
147
	availableServers := getAvailableServers()
	if len(availableServers) == 0 {
		if runtime.GOOS != "windows" {
			slog.Warn("llama server binary disappeared, reinitializing payloads")
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
			availableServers = getAvailableServers()
		} else {
			return nil, finalErr
		}
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
148
149
150
151
152
153
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
154
	demandLib := envconfig.LLMLibrary
155
156
157
158
159
160
161
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
162
163
164
165
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
166
167
168
169
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
170
		return nil, fmt.Errorf("no servers found for %v", gpus)
171
172
173
174
175
176
177
178
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
179
180

	params = append(params, "--log-disable")
181

Michael Yang's avatar
Michael Yang committed
182
	if opts.NumGPU >= 0 {
183
184
185
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

186
	if envconfig.Debug {
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

212
	flashAttnEnabled := envconfig.FlashAttention
Sam's avatar
Sam committed
213
214

	for _, g := range gpus {
215
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
216
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
217
			flashAttnEnabled = false
Sam's avatar
Sam committed
218
		}
219
220
221
222
223

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
224
			opts.UseMMap = api.TriStateFalse
225
		}
Sam's avatar
Sam committed
226
	}
227

228
	if flashAttnEnabled {
Sam's avatar
Sam committed
229
230
231
		params = append(params, "--flash-attn")
	}

232
	// Windows CUDA should not use mmap for best performance
233
	// Linux  with a model larger than free space, mmap leads to thrashing
Daniel Hiltgen's avatar
Daniel Hiltgen committed
234
	// For CPU loads we want the memory to be allocated, not FS cache
235
236
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
		(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
Daniel Hiltgen's avatar
Daniel Hiltgen committed
237
		(gpus[0].Library == "cpu" && opts.UseMMap == api.TriStateUndefined) ||
238
		opts.UseMMap == api.TriStateFalse {
239
240
241
242
243
244
245
246
247
248
249
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
250
251
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

252
253
254
255
256
257
258
259
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

Michael Yang's avatar
lint  
Michael Yang committed
260
	for i := range len(servers) {
261
		dir := availableServers[servers[i]]
262
263
264
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
265
			slog.Error("server list inconsistent", "error", finalErr)
266
267
			continue
		}
268

Daniel Hiltgen's avatar
Daniel Hiltgen committed
269
		if strings.HasPrefix(servers[i], "cpu") {
270
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
271
272
		}

273
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
292
293
		// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
		libraryPaths := []string{dir, filepath.Dir(dir)}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
294

295
296
297
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
298
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
299
300
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
301
302
303
304
305
306
307
308
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

309
310
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
311
			server += ".exe"
312
313
		}

314
315
316
317
318
319
320
321
322
323
324
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
325
		s := &llmServer{
326
327
328
329
330
331
332
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
333
			gpus:        gpus,
334
			done:        make(chan error, 1),
335
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
336

337
		s.cmd.Env = os.Environ()
338
339
340
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

Daniel Hiltgen's avatar
Daniel Hiltgen committed
341
342
343
344
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
345
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
346
347
348
349
350
351
352
353
354
355
356
357
358
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
359
360
361
362
363
364
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
365
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
366
		}
367
368
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
369
		}
370
371
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
372
373
		}

374
		slog.Info("starting llama server", "cmd", s.cmd.String())
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
		if envconfig.Debug {
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
391
392

		if err = s.cmd.Start(); err != nil {
393
394
395
396
397
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
398
399
400
401
402
403
404
405
406
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
407
408
409
410
411
		// reap subprocess when it exits
		go func() {
			s.done <- s.cmd.Wait()
		}()

412
413
414
415
416
417
418
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
419
func projectorMemoryRequirements(filename string) uint64 {
420
421
422
423
424
425
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

426
	ggml, _, err := DecodeGGML(file, 0)
427
428
429
430
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
431
432
433
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
434
435
	}

Michael Yang's avatar
Michael Yang committed
436
	return mem
437
438
439
440
441
442
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
443
	ServerStatusNoSlotsAvailable
444
445
446
447
448
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
449
450
451
452
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
453
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
454
455
456
457
458
459
460
461
462
463
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

464
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
465
466
467
468
469
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
470
471
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
472
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
473
474
475
476
477
478
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
479
480
481
482
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
483
484
485
486
487
488
489
490
491
492
493
494
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
495
			return ServerStatusNotResponding, errors.New("server not responding")
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
515
		return ServerStatusNoSlotsAvailable, nil
516
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
517
		s.loadProgress = status.Progress
518
519
520
521
522
523
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
547
func (s *llmServer) Ping(ctx context.Context) error {
548
549
550
551
552
553
554
555
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
556
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
557
	start := time.Now()
558
559
560
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
561
562
563

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
564
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
565

566
567
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
568
		case <-ctx.Done():
569
			slog.Warn("client connection closed before server finished loading, aborting load")
570
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
571
572
573
574
575
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Josh Yan's avatar
error  
Josh Yan committed
576
577
578
			if strings.Contains(msg, "unknown model") {
				return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade")
			}
579
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
580
581
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
582
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
583
			// timeout
584
585
586
587
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
588
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
589
590
591
592
593
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
594
			}
ManniX-ITA's avatar
ManniX-ITA committed
595
596
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
597
598
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
599
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
600
601
602
603
604
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
605
606
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
607
608
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
609
610
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
611
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
612
613
614
615
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
616
617
618
619
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
620
			}
ManniX-ITA's avatar
ManniX-ITA committed
621
622
			time.Sleep(time.Millisecond * 250)
			continue
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
645
    [^"\\\x7F\x00-\x1F] |
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
663
664
665
666
667
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
686
	DoneReason         string
687
688
689
690
691
692
693
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
694
695
696
697
698
699
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
700
701
702
703
704
705
706

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
733
	status, err := s.getServerStatusRetry(ctx)
734
735
736
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
737
		return fmt.Errorf("unexpected server status: %s", status.ToString())
738
739
740
741
742
743
744
745
746
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

747
748
749
750
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
751

752
753
754
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
755

756
757
758
759
760
761
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
762

763
764
765
766
767
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
768

769
770
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
771
		if err != nil {
772
			return fmt.Errorf("failed reading llm error response: %w", err)
773
		}
774
775
776
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
777

778
779
780
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
781

782
783
784
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
785

786
787
788
789
790
791
792
793
794
795
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
796

797
798
799
800
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
801

802
803
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
804
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
805
			}
806

807
808
809
810
811
812
813
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
814

815
816
817
818
819
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
820

821
822
823
824
825
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
826

827
			if c.Stop {
828
829
830
831
832
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

833
834
				fn(CompletionResponse{
					Done:               true,
835
					DoneReason:         doneReason,
836
837
838
839
840
841
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
842
843
			}
		}
844
	}
845

846
847
848
849
850
851
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
852
			}
853
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
854
855
		}

856
		return fmt.Errorf("error reading llm response: %v", err)
857
858
	}

859
	return nil
860
861
862
863
864
865
866
867
868
869
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
870
871
872
873
874
875
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
876

877
	// Make sure the server is ready
878
	status, err := s.getServerStatusRetry(ctx)
879
880
881
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
882
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
883
884
	}

Michael Yang's avatar
Michael Yang committed
885
886
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
887
888
889
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
890
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

Michael Yang's avatar
Michael Yang committed
920
921
922
923
924
925
926
927
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
928
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
978
979
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
980
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1022
1023
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1024
func (s *llmServer) Close() error {
1025
1026
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1027
1028
1029
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1030
1031
1032
1033
1034
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1035
1036

		slog.Debug("llama server stopped")
1037
1038
1039
1040
1041
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1042
func (s *llmServer) EstimatedVRAM() uint64 {
1043
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1044
1045
}

1046
func (s *llmServer) EstimatedTotal() uint64 {
1047
	return s.estimate.TotalSize
1048
1049
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1050
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1051
1052
1053
1054
1055
1056
1057
1058
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1059
1060
1061
1062
1063
1064
1065
1066
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}