"torchvision/csrc/io/image/cpu/readjpeg_cpu.cpp" did not exist on "74de51d6d478e289135d9274e6af550a9bfba137"
server.go 26.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
package llm

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"runtime"
	"strconv"
	"strings"
	"time"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
24
25
	"golang.org/x/sync/semaphore"

26
27
28
	"github.com/ollama/ollama/api"
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
29
	"github.com/ollama/ollama/server/envconfig"
30
31
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
33
34
35
36
37
38
39
40
type LlamaServer interface {
	Ping(ctx context.Context) error
	WaitUntilRunning(ctx context.Context) error
	Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
	Embedding(ctx context.Context, prompt string) ([]float64, error)
	Tokenize(ctx context.Context, content string) ([]int, error)
	Detokenize(ctx context.Context, tokens []int) (string, error)
	Close() error
	EstimatedVRAM() uint64
41
	EstimatedTotal() uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
43
44
45
}

// llmServer is an instance of the llama.cpp server
type llmServer struct {
46
47
48
49
	port    int
	cmd     *exec.Cmd
	done    chan error // Channel to signal when the process exits
	status  *StatusWriter
50
	options api.Options
Daniel Hiltgen's avatar
Daniel Hiltgen committed
51
52

	// TODO - this should be broken down by GPU
Daniel Hiltgen's avatar
Daniel Hiltgen committed
53
54
55
56
	estimatedVRAM  uint64 // Estimated usage of VRAM by the loaded model
	estimatedTotal uint64 // Total size of model
	totalLayers    uint64
	gpuCount       int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
57
	loadDuration   time.Duration // Record how long it took the model to load
Daniel Hiltgen's avatar
Daniel Hiltgen committed
58
59

	sem *semaphore.Weighted
60
61
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
62
63
64
65
66
func LoadModel(model string) (*GGML, error) {
	if _, err := os.Stat(model); err != nil {
		return nil, err
	}

67
68
69
70
71
72
73
	f, err := os.Open(model)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	ggml, _, err := DecodeGGML(f)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
74
75
	return ggml, err
}
76

Daniel Hiltgen's avatar
Daniel Hiltgen committed
77
78
79
80
// NewLlamaServer will run a server for the given GPUs
// The gpu list must be a single family.
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
	var err error
81
	var cpuRunner string
Daniel Hiltgen's avatar
Daniel Hiltgen committed
82
	var estimatedVRAM uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
	var estimatedTotal uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
	var systemMemory uint64
Daniel Hiltgen's avatar
Daniel Hiltgen committed
85
	gpuCount := len(gpus)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
	if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
87

Daniel Hiltgen's avatar
Daniel Hiltgen committed
88
		// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
89

Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
		cpuRunner = serverForCpu()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
91
		gpuCount = 0
Daniel Hiltgen's avatar
Daniel Hiltgen committed
92
93
94
95
96
97
98
99
100
	} else {
		if gpus[0].Library == "metal" {
			memInfo, err := gpu.GetCPUMem()
			if err != nil {
				slog.Error("failed to lookup system memory", "error", err)
			} else {
				systemMemory = memInfo.TotalMemory
				slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
			}
101
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
102
		var layers int
Daniel Hiltgen's avatar
Daniel Hiltgen committed
103
		layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
104
105
106
107
108

		if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
			// disable partial offloading when model is greater than total system memory as this
			// can lead to locking up the system
			opts.NumGPU = 0
109
110
111
112
		} else if gpus[0].Library != "metal" && layers == 0 {
			// Don't bother loading into the GPU if no layers can fit
			cpuRunner = serverForCpu()
			gpuCount = 0
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113
114
		} else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
			opts.NumGPU = layers
115
116
117
		}
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
119
	// Loop through potential servers
	finalErr := fmt.Errorf("no suitable llama servers found")
120
121
122
123
124
125

	if len(adapters) > 1 {
		return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
	}

	availableServers := availableServers()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
126
127
128
129
130
131
	var servers []string
	if cpuRunner != "" {
		servers = []string{cpuRunner}
	} else {
		servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
	}
132
	demandLib := envconfig.LLMLibrary
133
134
135
136
137
138
139
	if demandLib != "" {
		serverPath := availableServers[demandLib]
		if serverPath == "" {
			slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
		} else {
			slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
			servers = []string{demandLib}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
140
141
142
143
			if strings.HasPrefix(demandLib, "cpu") {
				// Omit the GPU flag to silence the warning
				opts.NumGPU = -1
			}
144
145
146
147
		}
	}

	if len(servers) == 0 {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
148
		return nil, fmt.Errorf("no servers found for %v", gpus)
149
150
151
152
153
154
155
156
	}

	params := []string{
		"--model", model,
		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
		"--embedding",
	}
Michael Yang's avatar
Michael Yang committed
157
158

	params = append(params, "--log-disable")
159

Michael Yang's avatar
Michael Yang committed
160
	if opts.NumGPU >= 0 {
161
162
163
		params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
	}

164
	if envconfig.Debug {
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
		params = append(params, "--verbose")
	}

	if opts.MainGPU > 0 {
		params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
	}

	if len(adapters) > 0 {
		// TODO: applying multiple adapters is not supported by the llama.cpp server yet
		params = append(params, "--lora", adapters[0])
	}

	if len(projectors) > 0 {
		// TODO: applying multiple projectors is not supported by the llama.cpp server yet
		params = append(params, "--mmproj", projectors[0])
	}

	if opts.NumThread > 0 {
		params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
	}

	if !opts.F16KV {
		params = append(params, "--memory-f32")
	}

	if opts.UseMLock {
		params = append(params, "--mlock")
	}

	if !opts.UseMMap {
		params = append(params, "--no-mmap")
	}

	if opts.UseNUMA {
		params = append(params, "--numa")
	}

202
	numParallel := envconfig.NumParallel
203
204
205
206
207
208
209
210

	// TODO (jmorganca): multimodal models don't support parallel yet
	// see https://github.com/ollama/ollama/issues/4165
	if len(projectors) > 0 {
		numParallel = 1
		slog.Warn("multimodal models don't support parallel requests yet")
	}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
211
212
	params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))

213
214
	for i := 0; i < len(servers); i++ {
		dir := availableServers[servers[i]]
215
216
217
		if dir == "" {
			// Shouldn't happen
			finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
Michael Yang's avatar
Michael Yang committed
218
			slog.Error("server list inconsistent", "error", finalErr)
219
220
			continue
		}
221

Daniel Hiltgen's avatar
Daniel Hiltgen committed
222
223
224
225
226
		if strings.HasPrefix(servers[i], "cpu") {
			// TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
			gpuCount = 0
		}

227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
		// Find an availableServers  port, retry on each iterration in case the failure was a port conflict race
		port := 0
		if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
			var l *net.TCPListener
			if l, err = net.ListenTCP("tcp", a); err == nil {
				port = l.Addr().(*net.TCPAddr).Port
				l.Close()
			}
		}
		if port == 0 {
			slog.Debug("ResolveTCPAddr failed ", "error", err)
			port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
		}
		finalParams := append(params, "--port", strconv.Itoa(port))

		pathEnv := "LD_LIBRARY_PATH"
		if runtime.GOOS == "windows" {
			pathEnv = "PATH"
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
246
		// prepend the server directory to LD_LIBRARY_PATH/PATH
247
		libraryPaths := []string{dir}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
248

249
250
251
		if libraryPath, ok := os.LookupEnv(pathEnv); ok {
			// Append our runner directory to the path
			// This will favor system libraries over our bundled library dependencies
Daniel Hiltgen's avatar
Daniel Hiltgen committed
252
			libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
253
254
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
255
256
257
258
259
260
261
262
		// Note: we always put the dependency path first
		// since this was the exact version we verified for AMD GPUs
		// and we favor what the user had in their path
		if gpus[0].DependencyPath != "" {
			// TODO refine for multi-gpu support
			libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
		}

263
264
265
266
267
		server := filepath.Join(dir, "ollama_llama_server")
		if runtime.GOOS == "windows" {
			server = server + ".exe"
		}

268
269
270
271
272
273
274
275
276
277
278
		// Detect tmp cleaners wiping out the file
		_, err := os.Stat(server)
		if errors.Is(err, os.ErrNotExist) {
			slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
			err = Init()
			if err != nil {
				slog.Warn("failed to reinitialize payloads", "error", err)
				return nil, err
			}
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
279
		s := &llmServer{
Daniel Hiltgen's avatar
Daniel Hiltgen committed
280
281
282
283
284
285
286
287
288
			port:           port,
			cmd:            exec.Command(server, finalParams...),
			status:         NewStatusWriter(os.Stderr),
			options:        opts,
			estimatedVRAM:  estimatedVRAM,
			estimatedTotal: estimatedTotal,
			sem:            semaphore.NewWeighted(int64(numParallel)),
			totalLayers:    ggml.KV().BlockCount() + 1,
			gpuCount:       gpuCount,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
289
			done:           make(chan error, 1),
290
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
291

292
		s.cmd.Env = os.Environ()
293
294
295
		s.cmd.Stdout = os.Stdout
		s.cmd.Stderr = s.status

296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
		visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
		pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))

		// Update or add the path and visible devices variable with our adjusted version
		pathNeeded := true
		devicesNeeded := visibleDevicesEnv != ""
		for i := range s.cmd.Env {
			cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
			if strings.EqualFold(cmp[0], pathEnv) {
				s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
				pathNeeded = false
			} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
				s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
				devicesNeeded = false
			}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
311
		}
312
313
		if pathNeeded {
			s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
314
		}
315
316
		if devicesNeeded {
			s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
317
318
		}

319
		slog.Info("starting llama server", "cmd", s.cmd.String())
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
		if envconfig.Debug {
			filteredEnv := []string{}
			for _, ev := range s.cmd.Env {
				if strings.HasPrefix(ev, "CUDA_") ||
					strings.HasPrefix(ev, "ROCM_") ||
					strings.HasPrefix(ev, "HIP_") ||
					strings.HasPrefix(ev, "HSA_") ||
					strings.HasPrefix(ev, "GGML_") ||
					strings.HasPrefix(ev, "PATH=") ||
					strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
					filteredEnv = append(filteredEnv, ev)
				}
			}
			// Log at debug as the environment is inherited and might contain sensitive information
			slog.Debug("subprocess", "environment", filteredEnv)
		}
336
337

		if err = s.cmd.Start(); err != nil {
338
339
340
341
342
			// Detect permission denied and augment them essage about noexec
			if errors.Is(err, os.ErrPermission) {
				finalErr = fmt.Errorf("unable to start server %w.  %s may have noexec set.  Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
				continue
			}
343
344
345
346
347
348
349
350
351
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
			finalErr = err
			continue
		}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
352
353
354
355
356
		// reap subprocess when it exits
		go func() {
			s.done <- s.cmd.Wait()
		}()

357
358
359
360
361
362
363
		return s, nil
	}

	slog.Error("unable to load any llama server", "error", finalErr)
	return nil, finalErr
}

Michael Yang's avatar
Michael Yang committed
364
func projectorMemoryRequirements(filename string) uint64 {
365
366
367
368
369
370
371
372
373
374
375
	file, err := os.Open(filename)
	if err != nil {
		return 0
	}
	defer file.Close()

	ggml, _, err := DecodeGGML(file)
	if err != nil {
		return 0
	}

Michael Yang's avatar
Michael Yang committed
376
377
378
	var mem uint64
	for _, layer := range ggml.Tensors().Layers() {
		mem += layer.size()
379
380
	}

Michael Yang's avatar
Michael Yang committed
381
	return mem
382
383
384
385
386
387
}

type ServerStatus int

const ( // iota is reset to 0
	ServerStatusReady ServerStatus = iota
388
	ServerStatusNoSlotsAvailable
389
390
391
392
393
	ServerStatusLoadingModel
	ServerStatusNotResponding
	ServerStatusError
)

Daniel Hiltgen's avatar
Daniel Hiltgen committed
394
395
396
397
func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "llm server ready"
398
	case ServerStatusNoSlotsAvailable:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
399
400
401
402
403
404
405
406
407
408
		return "llm busy - no slots available"
	case ServerStatusLoadingModel:
		return "llm server loading model"
	case ServerStatusNotResponding:
		return "llm server not responding"
	default:
		return "llm server error"
	}
}

409
410
411
412
413
414
415
type ServerStatusResp struct {
	Status          string `json:"status"`
	SlotsIdle       int    `json:"slots_idle"`
	SlotsProcessing int    `json:"slots_processing"`
	Error           string `json:"error"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
416
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
417
418
419
420
421
422
	// Fail fast if its exited
	if s.cmd.ProcessState != nil {
		msg := ""
		if s.status != nil && s.status.LastErrMsg != "" {
			msg = s.status.LastErrMsg
		}
423
424
425
426
		if s.cmd.ProcessState.ExitCode() == -1 {
			// Most likely a signal killed it, log some more details to try to help troubleshoot
			slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
		}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
		return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
	if err != nil {
		return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		if errors.Is(err, context.DeadlineExceeded) {
			return ServerStatusNotResponding, fmt.Errorf("server not responding")
		}
		return ServerStatusError, fmt.Errorf("health resp: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return ServerStatusError, fmt.Errorf("read health request: %w", err)
	}

	var status ServerStatusResp
	if err := json.Unmarshal(body, &status); err != nil {
		return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
	}

	switch status.Status {
	case "ok":
		return ServerStatusReady, nil
	case "no slot available":
459
		return ServerStatusNoSlotsAvailable, nil
460
461
462
463
464
465
466
	case "loading model":
		return ServerStatusLoadingModel, nil
	default:
		return ServerStatusError, fmt.Errorf("server error: %+v", status)
	}
}

467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
	var retries int
	for {
		status, err := s.getServerStatus(ctx)
		if err != nil {
			return status, err
		}

		if status == ServerStatusNoSlotsAvailable {
			if retries >= 10 {
				return status, fmt.Errorf("no slots available after %d retries", retries)
			}

			time.Sleep(5 * time.Millisecond)
			retries++
			continue
		}

		return status, nil
	}
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
490
func (s *llmServer) Ping(ctx context.Context) error {
491
492
493
494
495
496
497
498
	_, err := s.getServerStatus(ctx)
	if err != nil {
		slog.Debug("server unhealthy", "error", err)
		return err
	}
	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
499
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
500
	start := time.Now()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
501
	expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
502
503
504

	slog.Info("waiting for llama runner to start responding")
	var lastStatus ServerStatus = -1
ManniX-ITA's avatar
ManniX-ITA committed
505

506
507
	for {
		select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
508
509
		case <-ctx.Done():
			slog.Info("context expired before server started")
510
			return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
511
512
513
514
515
516
		case err := <-s.done:
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
517
518
		default:
		}
ManniX-ITA's avatar
ManniX-ITA committed
519
520
		if time.Now().After(expiresAt) {
			// timeout
521
522
523
524
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
ManniX-ITA's avatar
ManniX-ITA committed
525
526
527
528
529
530
			return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
		}
		if s.cmd.ProcessState != nil {
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
531
			}
ManniX-ITA's avatar
ManniX-ITA committed
532
533
			return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
534
535
536
537
538
539
540
		ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
		defer cancel()
		status, _ := s.getServerStatus(ctx)
		if lastStatus != status && status != ServerStatusReady {
			// Only log on status changes
			slog.Info("waiting for server to become available", "status", status.ToString())
		}
ManniX-ITA's avatar
ManniX-ITA committed
541
542
		switch status {
		case ServerStatusReady:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
543
544
			s.loadDuration = time.Since(start)
			slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
ManniX-ITA's avatar
ManniX-ITA committed
545
546
			return nil
		default:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
547
			lastStatus = status
ManniX-ITA's avatar
ManniX-ITA committed
548
549
			time.Sleep(time.Millisecond * 250)
			continue
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
		}
	}
}

const jsonGrammar = `
root   ::= object
value  ::= object | array | string | number | ("true" | "false" | "null") ws

object ::=
  "{" ws (
            string ":" ws value
    ("," ws string ":" ws value)*
  )? "}" ws

array  ::=
  "[" ws (
            value
    ("," ws value)*
  )? "]" ws

string ::=
  "\"" (
    [^"\\] |
    "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  )* "\"" ws

number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws

# Optional space: by convention, applied in this grammar after literal chars when allowed
ws ::= ([ \t\n] ws)?
`

const maxBufferSize = 512 * format.KiloByte

type ImageData struct {
	Data []byte `json:"data"`
	ID   int    `json:"id"`
}

type completion struct {
590
591
592
593
594
	Content      string `json:"content"`
	Model        string `json:"model"`
	Prompt       string `json:"prompt"`
	Stop         bool   `json:"stop"`
	StoppedLimit bool   `json:"stopped_limit"`
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612

	Timings struct {
		PredictedN  int     `json:"predicted_n"`
		PredictedMS float64 `json:"predicted_ms"`
		PromptN     int     `json:"prompt_n"`
		PromptMS    float64 `json:"prompt_ms"`
	}
}

type CompletionRequest struct {
	Prompt  string
	Format  string
	Images  []ImageData
	Options api.Options
}

type CompletionResponse struct {
	Content            string
613
	DoneReason         string
614
615
616
617
618
619
620
	Done               bool
	PromptEvalCount    int
	PromptEvalDuration time.Duration
	EvalCount          int
	EvalDuration       time.Duration
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
621
622
623
624
625
626
func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)
627
628
629
630
631
632
633

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
660
	status, err := s.getServerStatusRetry(ctx)
661
662
663
	if err != nil {
		return err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
664
		return fmt.Errorf("unexpected server status: %s", status.ToString())
665
666
667
668
669
670
671
672
673
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

674
675
676
677
	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)
678

679
680
681
	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}
682

683
684
685
686
687
688
	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")
689

690
691
692
693
694
	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()
695

696
697
	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
698
		if err != nil {
699
			return fmt.Errorf("failed reading llm error response: %w", err)
700
		}
701
702
703
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}
704

705
706
707
	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)
708

709
710
711
	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int
712

713
714
715
716
717
718
719
720
721
722
	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}
723

724
725
726
727
			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}
728

729
730
731
732
			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
				return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
			}
733

734
735
736
737
738
739
740
			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}
741

742
743
744
745
746
			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}
747

748
749
750
751
752
			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}
753

754
			if c.Stop {
755
756
757
758
759
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

760
761
				fn(CompletionResponse{
					Done:               true,
762
					DoneReason:         doneReason,
763
764
765
766
767
768
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
769
770
			}
		}
771
	}
772

773
774
775
776
777
778
	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
779
			}
780
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
781
782
		}

783
		return fmt.Errorf("error reading llm response: %v", err)
784
785
	}

786
	return nil
787
788
789
790
791
792
793
794
795
796
}

type EmbeddingRequest struct {
	Content string `json:"content"`
}

type EmbeddingResponse struct {
	Embedding []float64 `json:"embedding"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
797
798
799
800
801
802
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return nil, err
	}
	defer s.sem.Release(1)
803

804
	// Make sure the server is ready
805
	status, err := s.getServerStatusRetry(ctx)
806
807
808
	if err != nil {
		return nil, err
	} else if status != ServerStatusReady {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
809
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
	}

	data, err := json.Marshal(TokenizeRequest{Content: prompt})
	if err != nil {
		return nil, fmt.Errorf("error marshaling embed data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("error creating embed request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do embedding request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("error reading embed response: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var embedding EmbeddingResponse
	if err := json.Unmarshal(body, &embedding); err != nil {
		return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
	}

	return embedding.Embedding, nil
}

type TokenizeRequest struct {
	Content string `json:"content"`
}

type TokenizeResponse struct {
	Tokens []int `json:"tokens"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
855
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
856
857
858
859
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return nil, err
860
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
861
		return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
	}

	data, err := json.Marshal(TokenizeRequest{Content: content})
	if err != nil {
		return nil, fmt.Errorf("marshaling encode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return nil, fmt.Errorf("encode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return nil, fmt.Errorf("do encode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, fmt.Errorf("read encode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm encode error: %s", body)
		return nil, fmt.Errorf("%s", body)
	}

	var encoded TokenizeResponse
	if err := json.Unmarshal(body, &encoded); err != nil {
		return nil, fmt.Errorf("unmarshal encode response: %w", err)
	}

	return encoded.Tokens, nil
}

type DetokenizeRequest struct {
	Tokens []int `json:"tokens"`
}

type DetokenizeResponse struct {
	Content string `json:"content"`
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
907
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
908
909
910
911
	// Make sure the server is ready
	status, err := s.getServerStatus(ctx)
	if err != nil {
		return "", err
912
	} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
913
		return "", fmt.Errorf("unexpected server status: %s", status.ToString())
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
	}

	data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
	if err != nil {
		return "", fmt.Errorf("marshaling decode data: %w", err)
	}

	req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
	if err != nil {
		return "", fmt.Errorf("decode request: %w", err)
	}
	req.Header.Set("Content-Type", "application/json")

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", fmt.Errorf("do decode request: %w", err)
	}
	defer resp.Body.Close()

	body, err := io.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("read decode request: %w", err)
	}

	if resp.StatusCode >= 400 {
		log.Printf("llm decode error: %s", body)
		return "", fmt.Errorf("%s", body)
	}

	var decoded DetokenizeResponse
	if err := json.Unmarshal(body, &decoded); err != nil {
		return "", fmt.Errorf("unmarshal encode response: %w", err)
	}

	return decoded.Content, nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
951
func (s *llmServer) Close() error {
952
953
	if s.cmd != nil {
		slog.Debug("stopping llama server")
954
955
956
		if err := s.cmd.Process.Kill(); err != nil {
			return err
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
957
958
959
960
961
		// if ProcessState is already populated, Wait already completed, no need to wait again
		if s.cmd.ProcessState == nil {
			slog.Debug("waiting for llama server to exit")
			<-s.done
		}
962
963

		slog.Debug("llama server stopped")
964
965
966
967
968
	}

	return nil
}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
969
970
971
972
func (s *llmServer) EstimatedVRAM() uint64 {
	return s.estimatedVRAM
}

973
974
975
976
func (s *llmServer) EstimatedTotal() uint64 {
	return s.estimatedTotal
}

977
978
979
980
981
982
983
984
func parseDurationMs(ms float64) time.Duration {
	dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
	if err != nil {
		panic(err)
	}

	return dur
}