server.go 29 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
	"github.com/ollama/ollama/api"
27
	"github.com/ollama/ollama/envconfig"
28
29
30
31
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
40
	EstimatedVRAM() uint64 // Total VRAM across all GPUs
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
	EstimatedVRAMByGPU(gpuID string) uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
43
44
45
46
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
47
48
49
50
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
51
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
52

53
54
55
56
57
	estimate    MemoryEstimate
	totalLayers uint64
	// gpuCount     int
	gpus         gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
	loadDuration time.Duration   // Record how long it took the model to load
58
	loadProgress float32
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
60

	sem *semaphore.Weighted
61
62
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
63
64
65
66
67
func LoadModel(model string) (*GGML, error) {
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

68
69
70
71
72
73
74
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	ggml, _, err := DecodeGGML(f)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
76
	return ggml, err
}
77

Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
79
80
81
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
	var err error
82
	var cpuRunner string
83
	var estimate MemoryEstimate
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
	var systemMemory uint64
85

86
87
88
89
90
	// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
	if opts.NumGPU == 0 {
		gpus = gpu.GetCPUInfo()
	}
	if len(gpus) == 1 && gpus[0].Library == "cpu" {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
91
		cpuRunner = serverForCpu()
92
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
93
94
95
96
97
98
99
100
101
	} else {
		if gpus[0].Library == "metal" {
			memInfo, err := gpu.GetCPUMem()
			if err != nil {
				slog.Error("failed to lookup system memory", "error", err)
			} else {
				systemMemory = memInfo.TotalMemory
				slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
			}
102
		}
103
		estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
104

Michael Yang's avatar
Michael Yang committed
105
		switch {
106
		case gpus[0].Library == "metal" && estimate.VRAMSize > systemMemory:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
107
108
109
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
110
		case gpus[0].Library != "metal" && estimate.Layers == 0:
111
112
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
113
			gpus = gpu.GetCPUInfo()
114
115
		case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
			opts.NumGPU = estimate.Layers
116
117
118
		}
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
119
	// Loop through potential servers
Michael Yang's avatar
Michael Yang committed
120
	finalErr := errors.New("no suitable llama servers found")
121
122
123
124
125
126

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	availableServers := availableServers()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
127
128
129
130
131
132
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
133
	demandLib := envconfig.LLMLibrary
134
135
136
137
138
139
140
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
141
142
143
144
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
145
146
147
148
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
149
		return nil, fmt.Errorf("no servers found for %v", gpus)
150
151
152
153
154
155
156
157
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
158
159

	params = append(params, "--log-disable")
160

Michael Yang's avatar
Michael Yang committed
161
	if opts.NumGPU >= 0 {
162
163
164
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

165
	if envconfig.Debug {
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

191
	flashAttnEnabled := envconfig.FlashAttention
Sam's avatar
Sam committed
192
193

	for _, g := range gpus {
194
		// only cuda (compute capability 7+) and metal support flash attention
Sam's avatar
Sam committed
195
		if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
196
			flashAttnEnabled = false
Sam's avatar
Sam committed
197
		}
198
199
200
201
202

		// mmap has issues with partial offloading on metal
		if g.Library == "metal" &&
			uint64(opts.NumGPU) > 0 &&
			uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
203
			opts.UseMMap = api.TriStateFalse
204
		}
Sam's avatar
Sam committed
205
	}
206

207
	if flashAttnEnabled {
Sam's avatar
Sam committed
208
209
210
		params = append(params, "--flash-attn")
	}

211
212
	// Windows CUDA should not use mmap for best performance
	if (runtime.GOOS == "windows" && gpus[0].Library == "cuda") || opts.UseMMap == api.TriStateFalse {
213
214
215
216
217
218
219
220
221
222
223
		params = append(params, "--no-mmap")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

224
	numParallel := envconfig.NumParallel
225
226
227
228
229
230
231
232

	// TODO (jmorganca): multimodal models don't support parallel yet
	// see https://github.com/ollama/ollama/issues/4165
	if len(projectors) > 0 {
		numParallel = 1
		slog.Warn("multimodal models don't support parallel requests yet")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
233
234
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

235
236
237
238
239
240
241
242
	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

	if estimate.TensorSplit != "" {
		params = append(params, "--tensor-split", estimate.TensorSplit)
	}

Michael Yang's avatar
lint  
Michael Yang committed
243
	for i := range len(servers) {
244
		dir := availableServers[servers[i]]
245
246
247
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
248
			slog.Error("server list inconsistent", "error", finalErr)
249
250
			continue
		}
251

Daniel Hiltgen's avatar
Daniel Hiltgen committed
252
		if strings.HasPrefix(servers[i], "cpu") {
253
			gpus = gpu.GetCPUInfo()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
254
255
		}

256
		// Find an availableServers  port, retry on each iteration in case the failure was a port conflict race
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
275
		// prepend the server directory to LD_LIBRARY_PATH/PATH
276
		libraryPaths := []string{dir}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
277

278
279
280
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
281
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
282
283
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
284
285
286
287
288
289
290
291
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

292
293
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
Michael Yang's avatar
Michael Yang committed
294
			server += ".exe"
295
296
		}

297
298
299
300
301
302
303
304
305
306
307
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
308
		s := &llmServer{
309
310
311
312
313
314
315
			port:        port,
			cmd:         exec.Command(server, finalParams...),
			status:      NewStatusWriter(os.Stderr),
			options:     opts,
			estimate:    estimate,
			sem:         semaphore.NewWeighted(int64(numParallel)),
			totalLayers: ggml.KV().BlockCount() + 1,
316
			gpus:        gpus,
317
			done:        make(chan error, 1),
318
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
319

320
		s.cmd.Env = os.Environ()
321
322
323
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

Daniel Hiltgen's avatar
Daniel Hiltgen committed
324
325
326
327
		envWorkarounds := [][2]string{}
		for _, gpu := range gpus {
			envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
		}
Michael Yang's avatar
lint  
Michael Yang committed
328
		visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
329
330
331
332
333
334
335
336
337
338
339
340
341
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
Daniel Hiltgen's avatar
Daniel Hiltgen committed
342
343
344
345
346
347
			} else if len(envWorkarounds) != 0 {
				for _, kv := range envWorkarounds {
					if strings.EqualFold(cmp[0], kv[0]) {
						s.cmd.Env[i] = kv[0] + "=" + kv[1]
					}
				}
348
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
349
		}
350
351
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
352
		}
353
354
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
355
356
		}

357
		slog.Info("starting llama server", "cmd", s.cmd.String())
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
		if envconfig.Debug {
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
374
375

		if err = s.cmd.Start(); err != nil {
376
377
378
379
380
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
381
382
383
384
385
386
387
388
389
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
390
391
392
393
394
		// reap subprocess when it exits
		go func() {
			s.done <- s.cmd.Wait()
		}()

395
396
397
398
399
400
401
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
402
func projectorMemoryRequirements(filename string) uint64 {
403
404
405
406
407
408
409
410
411
412
413
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

	ggml, _, err := DecodeGGML(file)
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
414
415
416
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
417
418
	}

Michael Yang's avatar
Michael Yang committed
419
	return mem
420
421
422
423
424
425
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
426
	ServerStatusNoSlotsAvailable
427
428
429
430
431
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
432
433
434
435
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
436
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
437
438
439
440
441
442
443
444
445
446
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

447
type ServerStatusResp struct {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
448
449
450
451
452
	Status          string  `json:"status"`
	SlotsIdle       int     `json:"slots_idle"`
	SlotsProcessing int     `json:"slots_processing"`
	Error           string  `json:"error"`
	Progress        float32 `json:"progress"`
453
454
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
455
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
456
457
458
459
460
461
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
462
463
464
465
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
466
467
468
469
470
471
472
473
474
475
476
477
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
Michael Yang's avatar
Michael Yang committed
478
			return ServerStatusNotResponding, errors.New("server not responding")
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
498
		return ServerStatusNoSlotsAvailable, nil
499
	case "loading model":
Daniel Hiltgen's avatar
Daniel Hiltgen committed
500
		s.loadProgress = status.Progress
501
502
503
504
505
506
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
530
func (s *llmServer) Ping(ctx context.Context) error {
531
532
533
534
535
536
537
538
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
539
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
540
	start := time.Now()
541
542
543
	stallDuration := 5 * time.Minute            // If no progress happens
	finalLoadDuration := 5 * time.Minute        // After we hit 100%, give the runner more time to come online
	stallTimer := time.Now().Add(stallDuration) // give up if we stall
544
545
546

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
547
	fullyLoaded := false
ManniX-ITA's avatar
ManniX-ITA committed
548

549
550
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
551
		case <-ctx.Done():
552
			slog.Warn("client connection closed before server finished loading, aborting load")
553
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
554
555
556
557
558
559
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
560
561
		default:
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
562
		if time.Now().After(stallTimer) {
ManniX-ITA's avatar
ManniX-ITA committed
563
			// timeout
564
565
566
567
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
568
			return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
ManniX-ITA's avatar
ManniX-ITA committed
569
570
571
572
573
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
574
			}
ManniX-ITA's avatar
ManniX-ITA committed
575
576
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
577
578
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
579
		priorProgress := s.loadProgress
Daniel Hiltgen's avatar
Daniel Hiltgen committed
580
581
582
583
584
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
585
586
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
587
588
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
589
590
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
591
			lastStatus = status
Daniel Hiltgen's avatar
Daniel Hiltgen committed
592
593
594
595
			// Reset the timer as long as we're making forward progress on the load
			if priorProgress != s.loadProgress {
				slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
				stallTimer = time.Now().Add(stallDuration)
596
597
598
599
			} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
				slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
				stallTimer = time.Now().Add(finalLoadDuration)
				fullyLoaded = true
Daniel Hiltgen's avatar
Daniel Hiltgen committed
600
			}
ManniX-ITA's avatar
ManniX-ITA committed
601
602
			time.Sleep(time.Millisecond * 250)
			continue
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
625
    [^"\\\x7F\x00-\x1F] |
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
643
644
645
646
647
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
666
	DoneReason         string
667
668
669
670
671
672
673
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
674
675
676
677
678
679
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
680
681
682
683
684
685
686

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
713
	status, err := s.getServerStatusRetry(ctx)
714
715
716
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
717
		return fmt.Errorf("unexpected server status: %s", status.ToString())
718
719
720
721
722
723
724
725
726
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

727
728
729
730
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
731

732
733
734
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
735

736
737
738
739
740
741
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
742

743
744
745
746
747
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
748

749
750
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
751
		if err != nil {
752
			return fmt.Errorf("failed reading llm error response: %w", err)
753
		}
754
755
756
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
757

758
759
760
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
761

762
763
764
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
765

766
767
768
769
770
771
772
773
774
775
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
776

777
778
779
780
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
781

782
783
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
784
				return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
785
			}
786

787
788
789
790
791
792
793
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
794

795
796
797
798
799
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
800

801
802
803
804
805
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
806

807
			if c.Stop {
808
809
810
811
812
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

813
814
				fn(CompletionResponse{
					Done:               true,
815
					DoneReason:         doneReason,
816
817
818
819
820
821
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
822
823
			}
		}
824
	}
825

826
827
828
829
830
831
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
832
			}
833
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
834
835
		}

836
		return fmt.Errorf("error reading llm response: %v", err)
837
838
	}

839
	return nil
840
841
842
843
844
845
846
847
848
849
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
850
851
852
853
854
855
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
856

857
	// Make sure the server is ready
858
	status, err := s.getServerStatusRetry(ctx)
859
860
861
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
862
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
863
864
	}

Michael Yang's avatar
Michael Yang committed
865
866
	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
867
868
869
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

Michael Yang's avatar
Michael Yang committed
870
	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

Michael Yang's avatar
Michael Yang committed
900
901
902
903
904
905
906
907
type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
908
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
Michael Yang's avatar
Michael Yang committed
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
958
959
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
960
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
Michael Yang's avatar
Michael Yang committed
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
1002
1003
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1004
func (s *llmServer) Close() error {
1005
1006
	if s.cmd != nil {
		slog.Debug("stopping llama server")
1007
1008
1009
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1010
1011
1012
1013
1014
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
1015
1016

		slog.Debug("llama server stopped")
1017
1018
1019
1020
1021
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1022
func (s *llmServer) EstimatedVRAM() uint64 {
1023
	return s.estimate.VRAMSize
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1024
1025
}

1026
func (s *llmServer) EstimatedTotal() uint64 {
1027
	return s.estimate.TotalSize
1028
1029
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
1030
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
1031
1032
1033
1034
1035
1036
1037
1038
	for i, gpu := range s.gpus {
		if gpu.ID == gpuID {
			return s.estimate.GPUSizes[i]
		}
	}
	return 0
}

1039
1040
1041
1042
1043
1044
1045
1046
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}