runner.go 24 KB
Newer Older
Jesse Gross's avatar
Jesse Gross committed
1
package ollamarunner
2
3

import (
Jesse Gross's avatar
Jesse Gross committed
4
	"bytes"
5
6
7
8
9
	"context"
	"encoding/json"
	"errors"
	"flag"
	"fmt"
Jesse Gross's avatar
Jesse Gross committed
10
	"image"
11
12
13
14
15
16
17
18
19
20
21
22
	"log"
	"log/slog"
	"net"
	"net/http"
	"os"
	"path/filepath"
	"regexp"
	"runtime"
	"strconv"
	"strings"
	"sync"
	"time"
23
	"unicode/utf8"
24

25
26
	"golang.org/x/sync/semaphore"

27
	"github.com/ollama/ollama/api"
Jesse Gross's avatar
Jesse Gross committed
28
29
30
31
32
	"github.com/ollama/ollama/model"
	"github.com/ollama/ollama/runner/common"
	"github.com/ollama/ollama/sample"

	_ "github.com/ollama/ollama/model/models"
33
34
)

Jesse Gross's avatar
Jesse Gross committed
35
// input is an element of the prompt to process, either a token or an image
36
type input struct {
Jesse Gross's avatar
Jesse Gross committed
37
	token int32
38

Jesse Gross's avatar
Jesse Gross committed
39
	image image.Image
40
41
42
43
44
45
46
47
48
}

type Sequence struct {
	// batch index
	iBatch int

	// prompt inputs left to evaluate
	inputs []input

Jesse Gross's avatar
Jesse Gross committed
49
	// inputs that have been added to a batch but not yet submitted to Forward
50
51
	pendingInputs []input

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
	// tokens that have been generated but not returned yet (e.g. for stop sequences)
	pendingResponses []string

	// input cache being used by this sequence
	cache *InputCacheSlot

	// channel to send responses over
	responses chan string

	// channel to stop decoding (such as if the remote connection is closed)
	quit chan bool

	// number of tokens to predict
	numPredict int

Jesse Gross's avatar
Jesse Gross committed
67
68
	// set of samplers to run on generated logits
	samplers []sample.Sampler
69
70
71
72
73
74
75
76

	// channel to send back the embedding if embedding only
	embedding chan []float32

	// stop sequences
	stop []string

	// number of inputs to keep at the beginning when shifting context window
Jesse Gross's avatar
Jesse Gross committed
77
	numKeep int32
78
79
80
81
82
83
84
85
86

	// true if an embedding are to be returned instead of text generation
	embeddingOnly bool

	doneReason string

	// Metrics
	startProcessingTime time.Time
	startGenerationTime time.Time
Jesse Gross's avatar
Jesse Gross committed
87
	numPredicted        int
88
89
90
91
	numPromptInputs     int
}

type NewSequenceParams struct {
Jesse Gross's avatar
Jesse Gross committed
92
93
94
95
96
	numPredict int
	stop       []string
	numKeep    int32
	samplers   []sample.Sampler
	embedding  bool
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
}

func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequenceParams) (*Sequence, error) {
	s.ready.Wait()

	startTime := time.Now()

	inputs, err := s.inputs(prompt, images)
	if err != nil {
		return nil, fmt.Errorf("failed to process inputs: %w", err)
	} else if len(inputs) == 0 {
		return nil, errors.New("no input provided")
	}

	if params.numKeep < 0 {
Jesse Gross's avatar
Jesse Gross committed
112
		params.numKeep = int32(len(inputs))
113
114
	}

115
116
117
	// Ensure that at least 1 input can be discarded during shift
	params.numKeep = min(params.numKeep, s.cache.numCtx-1)

Jesse Gross's avatar
Jesse Gross committed
118
119
	if int32(len(inputs)) > s.cache.numCtx {
		discard := int32(len(inputs)) - s.cache.numCtx
120
		newInputs := inputs[:params.numKeep]
121
122
123
		newInputs = append(newInputs, inputs[params.numKeep+discard:]...)

		slog.Warn("truncating input prompt", "limit", s.cache.numCtx, "prompt", len(inputs), "keep", params.numKeep, "new", len(newInputs))
124
		inputs = newInputs
125
126
	}

Jesse Gross's avatar
Jesse Gross committed
127
	// TODO(jessegross): Ingest cached history for grammar
128
129
130
131
132
133
134
135
136
137

	return &Sequence{
		inputs:              inputs,
		numPromptInputs:     len(inputs),
		startProcessingTime: startTime,
		numPredict:          params.numPredict,
		pendingResponses:    make([]string, 0),
		responses:           make(chan string, 100),
		quit:                make(chan bool, 1),
		embedding:           make(chan []float32, 1),
Jesse Gross's avatar
Jesse Gross committed
138
		samplers:            params.samplers,
139
140
141
142
143
144
145
146
		embeddingOnly:       params.embedding,
		stop:                params.stop,
		numKeep:             params.numKeep,
	}, nil
}

// inputs processes the prompt and images into a list of inputs
// by splitting the prompt on [img-<n>] tags, tokenizing text and
Jesse Gross's avatar
Jesse Gross committed
147
// decoding images
148
149
func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) {
	var inputs []input
150
151
152
	var parts []string
	var matches [][]string

Jesse Gross's avatar
Jesse Gross committed
153
154
155
156
157
158
159
	// TODO(jessegross): This can sometimes trigger for matching text in the
	// user's prompt. We previously tried to avoid it by only looking for images
	// on image models. We don't have a clear indication now but it would be better
	// to properly escape it in any case.
	re := regexp.MustCompile(`\[img-(\d+)\]`)
	parts = re.Split(prompt, -1)
	matches = re.FindAllStringSubmatch(prompt, -1)
160
161
162

	for i, part := range parts {
		// text - tokenize
Jesse Gross's avatar
Jesse Gross committed
163
		tokens, err := s.model.(model.TextProcessor).Encode(part)
164
165
166
		if err != nil {
			return nil, err
		}
167

168
169
		for _, t := range tokens {
			inputs = append(inputs, input{token: t})
170
171
		}

Jesse Gross's avatar
Jesse Gross committed
172
		// image - decode and store
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
		if i < len(matches) {
			n, _ := strconv.Atoi(matches[i][1])

			imageIndex := -1
			for j := range images {
				if images[j].ID == n {
					imageIndex = j
					break
				}
			}

			if imageIndex < 0 {
				return nil, fmt.Errorf("invalid image index: %d", n)
			}

Jesse Gross's avatar
Jesse Gross committed
188
			image, _, err := image.Decode(bytes.NewReader(images[imageIndex].Data))
Jesse Gross's avatar
Jesse Gross committed
189
190
191
192
			if err != nil {
				return nil, err
			}

Jesse Gross's avatar
Jesse Gross committed
193
			inputs = append(inputs, input{image: image})
194
195
196
197
198
199
200
		}
	}

	return inputs, nil
}

type Server struct {
201
202
203
204
205
	// is the server ready to process requests?
	// protects access to model and image
	ready sync.WaitGroup

	// loaded model
Jesse Gross's avatar
Jesse Gross committed
206
	model model.Model
207

208
209
210
211
212
213
214
215
216
217
	// status for external health reporting - loading, ready to serve, etc.
	status ServerStatus

	// current progress on loading the model
	progress float32

	// number of simultaneous requests to handle
	parallel int

	// maximum number of elements in a batch (per sequence)
218
	// TODO (jmorganca): make this n_batch
219
220
	batchSize int

221
222
223
224
225
226
227
228
	// protects access to everything below this line
	// this is context state needed for decoding
	mu sync.Mutex

	// indicates that data is ready for processing
	cond *sync.Cond

	// the list of simultaneous sequences being evaluated
229
230
	seqs []*Sequence

231
232
233
234
	// seqs can have a maximum of parallel entries, which
	// is enfoced by seqSem
	seqsSem *semaphore.Weighted

235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
	// KV cache
	cache *InputCache

	// next sequence for prompt processing to avoid starvation
	nextSeq int
}

func (s *Server) allNil() bool {
	for _, item := range s.seqs {
		if item != nil {
			return false
		}
	}
	return true
}

func flushPending(seq *Sequence) bool {
252
253
254
255
256
257
258
259
260
261
262
	joined := strings.Join(seq.pendingResponses, "")
	seq.pendingResponses = []string{}

	// Check if there are any partial UTF-8 characters remaining.
	// We already check and queue as we are generating but some may
	// still make it here:
	// - Sequence is ending, e.g. generation limit has been hit
	// - Invalid characters in the middle of a string
	// This is a stricter check to ensure we never output invalid Unicode.
	for !utf8.ValidString(joined) {
		joined = joined[:len(joined)-1]
263
264
	}

265
266
267
268
269
270
271
272
273
274
	if len(joined) == 0 {
		return true
	}

	select {
	case seq.responses <- joined:
		return true
	case <-seq.quit:
		return false
	}
275
276
277
278
279
280
281
282
283
284
285
}

func (s *Server) removeSequence(seqIndex int, reason string) {
	seq := s.seqs[seqIndex]

	flushPending(seq)
	seq.doneReason = reason
	close(seq.responses)
	close(seq.embedding)
	seq.cache.InUse = false
	s.seqs[seqIndex] = nil
286
	s.seqsSem.Release(1)
287
288
289
290
291
292
293
294
295
296
}

func (s *Server) run(ctx context.Context) {
	s.ready.Wait()

	for {
		select {
		case <-ctx.Done():
			return
		default:
Jesse Gross's avatar
Jesse Gross committed
297
			err := s.processBatch()
298
299
300
			if err != nil {
				panic(err)
			}
301
302
303
304
		}
	}
}

Jesse Gross's avatar
Jesse Gross committed
305
func (s *Server) processBatch() error {
306
307
308
309
310
311
	s.mu.Lock()
	for s.allNil() {
		s.cond.Wait() // Wait until an item is added
	}
	defer s.mu.Unlock()

Jesse Gross's avatar
Jesse Gross committed
312
313
	var options model.Options
	imgSeq := -1
314
315
316
317
318
319
320
321
322
323
324

	seqIdx := s.nextSeq - 1
	for range s.seqs {
		seqIdx = (seqIdx + 1) % len(s.seqs)
		seq := s.seqs[seqIdx]

		if seq == nil {
			continue
		}

		// if past the num predict limit
325
		if seq.numPredict > 0 && seq.numPredicted >= seq.numPredict {
326
327
328
329
			s.removeSequence(seqIdx, "limit")
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
330
331
332
333
334
		if !s.cache.enabled {
			seq.inputs = append(seq.cache.Inputs, seq.inputs...)
			seq.cache.Inputs = []input{}
		}

335
		for i, input := range seq.inputs {
Jesse Gross's avatar
Jesse Gross committed
336
			if int32(len(seq.cache.Inputs)+len(seq.pendingInputs)+1) > s.cache.numCtx {
337
338
339
340
341
				if len(seq.pendingInputs) == 0 {
					err := s.cache.ShiftCacheSlot(seq.cache, seq.numKeep)
					if err != nil {
						return err
					}
342
343
344
345
346
				} else {
					break
				}
			}

Jesse Gross's avatar
Jesse Gross committed
347
			if i >= s.batchSize {
348
349
350
				break
			}

Jesse Gross's avatar
Jesse Gross committed
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
			// TODO(jessegross): Image inputs need to be rethought - it's
			// it doesn't work well for different types of models or multiple sequences
			if input.image != nil {
				if len(seq.pendingInputs) != len(options.Images) {
					break
				}

				if imgSeq != seqIdx && imgSeq != -1 {
					s.nextSeq = seqIdx
					break
				}

				imgSeq = seqIdx
				options.Images = append(options.Images, input.image)
				seq.pendingInputs = append(seq.pendingInputs, input)
				continue
367
368
			}

Jesse Gross's avatar
Jesse Gross committed
369
370
371
372
373
374
375
376
			options.Inputs = append(options.Inputs, input.token)
			options.Positions = append(options.Positions, int32(len(seq.cache.Inputs)+len(seq.pendingInputs)))
			options.Sequences = append(options.Sequences, seq.cache.Id)

			seq.iBatch = len(options.Outputs)
			if i+1 == len(seq.inputs) {
				options.Outputs = append(options.Outputs, int32(len(options.Inputs)-1))
			}
377
			seq.pendingInputs = append(seq.pendingInputs, input)
378
		}
379
380

		seq.inputs = seq.inputs[len(seq.pendingInputs):]
381
382
	}

Jesse Gross's avatar
Jesse Gross committed
383
	if len(options.Inputs) == 0 {
384
		return nil
385
386
	}

Jesse Gross's avatar
Jesse Gross committed
387
388
	ctx := s.model.Backend().NewContext()
	defer ctx.Close()
389

Jesse Gross's avatar
Jesse Gross committed
390
	modelOutput, err := model.Forward(ctx, s.model, options)
391
	if err != nil {
392
		return fmt.Errorf("failed to decode batch: %w", err)
393
394
	}

Jesse Gross's avatar
Jesse Gross committed
395
396
397
398
399
400
	f32s := modelOutput.Floats()

	// TODO(jessegross): This will no longer be necessary once the sampling interface takes f32s
	logits := make([]float64, len(f32s))
	for i, f32 := range f32s {
		logits[i] = float64(f32)
401
402
	}

403
404
405
406
407
	for i, seq := range s.seqs {
		if seq == nil {
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
408
		// After calling Forward, pending inputs are now in the cache
409
410
411
412
413
		if len(seq.pendingInputs) > 0 {
			seq.cache.Inputs = append(seq.cache.Inputs, seq.pendingInputs...)
			seq.pendingInputs = []input{}
		}

414
415
		// don't sample prompt processing
		if len(seq.inputs) != 0 {
Jesse Gross's avatar
Jesse Gross committed
416
417
418
			if !s.cache.enabled {
				return errors.New("caching disabled but unable to fit entire input in a batch")
			}
419
420
421
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
422
423
		seq.numPredicted++
		if seq.numPredicted == 1 {
424
425
426
427
428
			seq.startGenerationTime = time.Now()
		}

		// if done processing the prompt, generate an embedding and return
		if seq.embeddingOnly {
Jesse Gross's avatar
Jesse Gross committed
429
			// TODO(jessegross): Embedding support
430
431
432
433
434
			s.removeSequence(i, "")
			continue
		}

		// sample a token
Jesse Gross's avatar
Jesse Gross committed
435
436
437
438
439
		vocabSize := len(f32s) / len(options.Outputs)
		tokens, err := sample.Sample(logits[seq.iBatch*vocabSize:(seq.iBatch+1)*vocabSize], seq.samplers...)
		if err != nil {
			return err
		}
440

Jesse Gross's avatar
Jesse Gross committed
441
442
		// TODO(jessegross): Sampler will output a single int32 in the future
		token := int32(tokens[0])
443
444

		// if it's an end of sequence token, break
Jesse Gross's avatar
Jesse Gross committed
445
		if s.model.(model.TextProcessor).Is(token, model.SpecialEOS) {
446
447
448
449
450
451
452
453
			// TODO (jmorganca): we should send this back
			// as it's important for the /api/generate context
			// seq.responses <- piece

			s.removeSequence(i, "stop")
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
454
455
456
457
458
		piece, err := s.model.(model.TextProcessor).Decode([]int32{token})
		if err != nil {
			return err
		}

459
460
461
462
463
		seq.inputs = []input{{token: token}}

		seq.pendingResponses = append(seq.pendingResponses, piece)
		sequence := strings.Join(seq.pendingResponses, "")

Jesse Gross's avatar
Jesse Gross committed
464
		if ok, stop := common.FindStop(sequence, seq.stop); ok {
465
466
467
468
			slog.Debug("hit stop token", "pending", seq.pendingResponses, "stop", stop)

			var tokenTruncated bool
			origLen := len(seq.pendingResponses)
Jesse Gross's avatar
Jesse Gross committed
469
			seq.pendingResponses, tokenTruncated = common.TruncateStop(seq.pendingResponses, stop)
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
			newLen := len(seq.pendingResponses)

			// Update the cache based on the tokens that will be returned:
			// - We have 1 token more than is currently in the cache because
			// the last one generated wasn't submitted to Decode
			// - Remove any stop sequences that we stripped out
			// - If truncateStop removed a portion of a token, drop that
			// - As defense-in-depth, if truncatedToken didn't find a stop token
			// remove the extra one that we added to the cache len
			tokenLen := len(seq.cache.Inputs) + 1
			tokenLen -= origLen - newLen
			if tokenTruncated || origLen == newLen {
				tokenLen--
			}
			seq.cache.Inputs = seq.cache.Inputs[:tokenLen]
485
486
487
488
489

			s.removeSequence(i, "stop")
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
490
		if common.ContainsStopSuffix(sequence, seq.stop) {
491
492
493
			continue
		}

Jesse Gross's avatar
Jesse Gross committed
494
		if common.IncompleteUnicode(sequence) {
495
496
497
498
499
500
501
			continue
		}

		if !flushPending(seq) {
			s.removeSequence(i, "connection")
		}
	}
502
503

	return nil
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
}

// TODO (jmorganca): use structs from the api package to avoid duplication
// this way the api acts as a proxy instead of using a different api for the
// runner
type Options struct {
	api.Runner

	NumKeep          int      `json:"n_keep"`
	Seed             int      `json:"seed"`
	NumPredict       int      `json:"n_predict"`
	TopK             int      `json:"top_k"`
	TopP             float32  `json:"top_p"`
	MinP             float32  `json:"min_p"`
	TypicalP         float32  `json:"typical_p"`
	RepeatLastN      int      `json:"repeat_last_n"`
	Temperature      float32  `json:"temperature"`
	RepeatPenalty    float32  `json:"repeat_penalty"`
	PresencePenalty  float32  `json:"presence_penalty"`
	FrequencyPenalty float32  `json:"frequency_penalty"`
	Mirostat         int      `json:"mirostat"`
	MirostatTau      float32  `json:"mirostat_tau"`
	MirostatEta      float32  `json:"mirostat_eta"`
	Stop             []string `json:"stop"`
}

type ImageData struct {
531
532
533
	Data          []byte `json:"data"`
	ID            int    `json:"id"`
	AspectRatioID int    `json:"aspect_ratio_id"`
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
}

type CompletionRequest struct {
	Prompt      string      `json:"prompt"`
	Images      []ImageData `json:"image_data"`
	Grammar     string      `json:"grammar"`
	CachePrompt bool        `json:"cache_prompt"`

	Options
}

type Timings struct {
	PredictedN  int     `json:"predicted_n"`
	PredictedMS float64 `json:"predicted_ms"`
	PromptN     int     `json:"prompt_n"`
	PromptMS    float64 `json:"prompt_ms"`
}

type CompletionResponse struct {
	Content string `json:"content"`
	Stop    bool   `json:"stop"`

	Model        string  `json:"model,omitempty"`
	Prompt       string  `json:"prompt,omitempty"`
	StoppedLimit bool    `json:"stopped_limit,omitempty"`
	PredictedN   int     `json:"predicted_n,omitempty"`
	PredictedMS  float64 `json:"predicted_ms,omitempty"`
	PromptN      int     `json:"prompt_n,omitempty"`
	PromptMS     float64 `json:"prompt_ms,omitempty"`

	Timings Timings `json:"timings"`
}

Jesse Gross's avatar
Jesse Gross committed
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
func getSamplers(_ CompletionRequest) []sample.Sampler {
	// TODO(jessegross): Waiting for sampling code

	/*samplingParams.TopK = req.TopK
	samplingParams.TopP = req.TopP
	samplingParams.MinP = req.MinP
	samplingParams.TypicalP = req.TypicalP
	samplingParams.Temp = req.Temperature
	samplingParams.RepeatLastN = req.RepeatLastN
	samplingParams.PenaltyRepeat = req.RepeatPenalty
	samplingParams.PenaltyFreq = req.FrequencyPenalty
	samplingParams.PenaltyPresent = req.PresencePenalty
	samplingParams.Mirostat = req.Mirostat
	samplingParams.MirostatTau = req.MirostatTau
	samplingParams.MirostatEta = req.MirostatEta
	samplingParams.Seed = uint32(req.Seed)
	samplingParams.Grammar = req.Grammar*/

	return []sample.Sampler{sample.Greedy()}
}

588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
func (s *Server) completion(w http.ResponseWriter, r *http.Request) {
	var req CompletionRequest
	req.Options = Options(api.DefaultOptions())
	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
		http.Error(w, "Bad request", http.StatusBadRequest)
		return
	}

	// Set the headers to indicate streaming
	w.Header().Set("Content-Type", "application/json")
	w.Header().Set("Transfer-Encoding", "chunked")

	flusher, ok := w.(http.Flusher)
	if !ok {
		http.Error(w, "Streaming not supported", http.StatusInternalServerError)
		return
	}

	seq, err := s.NewSequence(req.Prompt, req.Images, NewSequenceParams{
Jesse Gross's avatar
Jesse Gross committed
607
608
609
610
611
		numPredict: req.NumPredict,
		stop:       req.Stop,
		numKeep:    int32(req.NumKeep),
		samplers:   getSamplers(req),
		embedding:  false,
612
613
614
615
616
617
	})
	if err != nil {
		http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError)
		return
	}

618
	// Ensure there is a place to put the sequence, released when removed from s.seqs
619
	if err := s.seqsSem.Acquire(r.Context(), 1); err != nil {
620
621
622
623
624
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting completion request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
625
626
627
		return
	}

628
	s.mu.Lock()
629
	found := false
630
631
	for i, sq := range s.seqs {
		if sq == nil {
632
			seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, req.CachePrompt)
633
634
635
636
637
			if err != nil {
				s.mu.Unlock()
				http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError)
				return
			}
638

639
640
			s.seqs[i] = seq
			s.cond.Signal()
641
			found = true
642
643
644
645
646
			break
		}
	}
	s.mu.Unlock()

647
648
649
650
651
	if !found {
		http.Error(w, "could not find an available sequence", http.StatusInternalServerError)
		return
	}

652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
	for {
		select {
		case <-r.Context().Done():
			close(seq.quit)
			return
		case content, ok := <-seq.responses:
			if ok {
				if err := json.NewEncoder(w).Encode(&CompletionResponse{
					Content: content,
				}); err != nil {
					http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError)
					close(seq.quit)
					return
				}

				flusher.Flush()
			} else {
				// Send the final response
				if err := json.NewEncoder(w).Encode(&CompletionResponse{
					Stop:         true,
					StoppedLimit: seq.doneReason == "limit",
					Timings: Timings{
						PromptN:     seq.numPromptInputs,
						PromptMS:    float64(seq.startGenerationTime.Sub(seq.startProcessingTime).Milliseconds()),
Jesse Gross's avatar
Jesse Gross committed
676
						PredictedN:  seq.numPredicted,
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
						PredictedMS: float64(time.Since(seq.startGenerationTime).Milliseconds()),
					},
				}); err != nil {
					http.Error(w, fmt.Sprintf("failed to encode final response: %v", err), http.StatusInternalServerError)
				}

				return
			}
		}
	}
}

type EmbeddingRequest struct {
	Content     string `json:"content"`
	CachePrompt bool   `json:"cache_prompt"`
}

type EmbeddingResponse struct {
	Embedding []float32 `json:"embedding"`
}

func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) {
	var req EmbeddingRequest
	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
		http.Error(w, fmt.Sprintf("bad request: %s", err), http.StatusBadRequest)
		return
	}

	w.Header().Set("Content-Type", "application/json")

	slog.Debug("embedding request", "content", req.Content)

	seq, err := s.NewSequence(req.Content, nil, NewSequenceParams{embedding: true})
	if err != nil {
		http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError)
		return
	}

715
	// Ensure there is a place to put the sequence, released when removed from s.seqs
716
	if err := s.seqsSem.Acquire(r.Context(), 1); err != nil {
717
718
719
720
721
		if errors.Is(err, context.Canceled) {
			slog.Info("aborting embeddings request due to client closing the connection")
		} else {
			slog.Error("Failed to acquire semaphore", "error", err)
		}
722
723
724
		return
	}

725
	s.mu.Lock()
726
	found := false
727
728
	for i, sq := range s.seqs {
		if sq == nil {
729
			seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, req.CachePrompt)
730
731
732
733
734
735
736
			if err != nil {
				s.mu.Unlock()
				http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError)
				return
			}
			s.seqs[i] = seq
			s.cond.Signal()
737
			found = true
738
739
740
741
742
			break
		}
	}
	s.mu.Unlock()

743
744
745
746
747
	if !found {
		http.Error(w, "could not find an available sequence", http.StatusInternalServerError)
		return
	}

748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
	embedding := <-seq.embedding

	if err := json.NewEncoder(w).Encode(&EmbeddingResponse{
		Embedding: embedding,
	}); err != nil {
		http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError)
	}
}

type HealthResponse struct {
	Status   string  `json:"status"`
	Progress float32 `json:"progress"`
}

type ServerStatus int

const (
	ServerStatusReady ServerStatus = iota
	ServerStatusLoadingModel
	ServerStatusError
)

func (s ServerStatus) ToString() string {
	switch s {
	case ServerStatusReady:
		return "ok"
	case ServerStatusLoadingModel:
		return "loading model"
	default:
		return "server error"
	}
}

func (s *Server) health(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")
	if err := json.NewEncoder(w).Encode(&HealthResponse{
		Status:   s.status.ToString(),
		Progress: s.progress,
	}); err != nil {
		http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError)
	}
}

791
792
793
794
795
796
797
798
799
800
801
type multiLPath []string

func (m *multiLPath) Set(value string) error {
	*m = append(*m, value)
	return nil
}

func (m *multiLPath) String() string {
	return strings.Join(*m, ", ")
}

802
803
func (s *Server) loadModel(
	mpath string,
804
	lpath multiLPath,
Jesse Gross's avatar
Jesse Gross committed
805
	parallel int,
806
	kvCacheType string,
Jesse Gross's avatar
Jesse Gross committed
807
	kvSize int,
808
809
	multiUserCache bool,
) {
810
	var err error
Jesse Gross's avatar
Jesse Gross committed
811
	s.model, err = model.New(mpath)
812
813
814
	if err != nil {
		panic(err)
	}
815

Jesse Gross's avatar
Jesse Gross committed
816
	// TODO(jessegross): LoRA loading
817
	if lpath.String() != "" {
Jesse Gross's avatar
Jesse Gross committed
818
		panic("loras are not yet implemented")
819
820
	}

Jesse Gross's avatar
Jesse Gross committed
821
	s.cache, err = NewInputCache(s.model, kvCacheType, int32(kvSize), parallel, multiUserCache)
822
823
824
	if err != nil {
		panic(err)
	}
825

Jesse Gross's avatar
Jesse Gross committed
826
827
828
829
830
831
832
833
834
	if !s.cache.enabled && parallel > 1 {
		parallel = 1
		slog.Warn("model does not support caching, disabling parallel processing")
	}

	s.parallel = parallel
	s.seqs = make([]*Sequence, s.parallel)
	s.seqsSem = semaphore.NewWeighted(int64(s.parallel))

835
836
837
838
	s.status = ServerStatusReady
	s.ready.Done()
}

839
840
841
842
843
func Execute(args []string) error {
	fs := flag.NewFlagSet("runner", flag.ExitOnError)
	mpath := fs.String("model", "", "Path to model binary file")
	parallel := fs.Int("parallel", 1, "Number of sequences to handle simultaneously")
	batchSize := fs.Int("batch-size", 512, "Batch size")
Jesse Gross's avatar
Jesse Gross committed
844
845
846
	_ = fs.Int("n-gpu-layers", 0, "Number of layers to offload to GPU")
	_ = fs.Int("main-gpu", 0, "Main GPU")
	_ = fs.Bool("flash-attn", false, "Enable flash attention")
847
848
849
	kvSize := fs.Int("ctx-size", 2048, "Context (or KV cache) size")
	kvCacheType := fs.String("kv-cache-type", "", "quantization type for KV cache (default: f16)")
	port := fs.Int("port", 8080, "Port to expose the server on")
Jesse Gross's avatar
Jesse Gross committed
850
	_ = fs.Int("threads", runtime.NumCPU(), "Number of threads to use during generation")
851
	verbose := fs.Bool("verbose", false, "verbose output (default: disabled)")
Jesse Gross's avatar
Jesse Gross committed
852
853
854
	_ = fs.Bool("no-mmap", false, "do not memory-map model (slower load but may reduce pageouts if not using mlock)")
	_ = fs.Bool("mlock", false, "force system to keep model in RAM rather than swapping or compressing")
	_ = fs.String("tensor-split", "", "fraction of the model to offload to each GPU, comma-separated list of proportions")
855
	multiUserCache := fs.Bool("multiuser-cache", false, "optimize input cache algorithm for multiple users")
856

857
	var lpaths multiLPath
858
	fs.Var(&lpaths, "lora", "Path to lora layer file (can be specified multiple times)")
859

860
861
862
863
864
865
	fs.Usage = func() {
		fmt.Fprintf(fs.Output(), "Runner usage\n")
		fs.PrintDefaults()
	}
	if err := fs.Parse(args); err != nil {
		return err
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
	}
	level := slog.LevelInfo
	if *verbose {
		level = slog.LevelDebug
	}
	handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
		Level:     level,
		AddSource: true,
		ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr {
			if attr.Key == slog.SourceKey {
				source := attr.Value.Any().(*slog.Source)
				source.File = filepath.Base(source.File)
			}
			return attr
		},
	})
	slog.SetDefault(slog.New(handler))
Jesse Gross's avatar
Jesse Gross committed
883
884
	slog.Info("starting ollama engine")
	// TODO(jessegross): Some system info would be useful
885
886
887
888
889
890

	server := &Server{
		batchSize: *batchSize,
		status:    ServerStatusLoadingModel,
	}

Jesse Gross's avatar
Jesse Gross committed
891
892
893
894
895
896
897
898
899
900
	// TODO(jessegross): Parameters that need to be implemented:
	//	n-gpu-layers
	//	main-gpu
	//	flash-attn
	//	threads
	//	no-mmap
	//	mlock
	//	tensor-split

	/*var tensorSplitFloats []float32
901
902
903
904
905
906
907
908
	if *tensorSplit != "" {
		stringFloats := regexp.MustCompile(",").Split(*tensorSplit, -1)

		tensorSplitFloats = make([]float32, 0, len(stringFloats))
		for _, s := range stringFloats {
			f, _ := strconv.ParseFloat(s, 32)
			tensorSplitFloats = append(tensorSplitFloats, float32(f))
		}
Jesse Gross's avatar
Jesse Gross committed
909
	}*/
910
911

	server.ready.Add(1)
Jesse Gross's avatar
Jesse Gross committed
912
	go server.loadModel(*mpath, lpaths, *parallel, *kvCacheType, *kvSize, *multiUserCache)
913
914
915
916
917
918
919
920
921
922

	server.cond = sync.NewCond(&server.mu)

	ctx, cancel := context.WithCancel(context.Background())
	go server.run(ctx)

	addr := "127.0.0.1:" + strconv.Itoa(*port)
	listener, err := net.Listen("tcp", addr)
	if err != nil {
		fmt.Println("Listen error:", err)
923
924
		cancel()
		return err
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
	}
	defer listener.Close()

	mux := http.NewServeMux()
	mux.HandleFunc("/embedding", server.embeddings)
	mux.HandleFunc("/completion", server.completion)
	mux.HandleFunc("/health", server.health)

	httpServer := http.Server{
		Handler: mux,
	}

	log.Println("Server listening on", addr)
	if err := httpServer.Serve(listener); err != nil {
		log.Fatal("server error:", err)
940
		return err
941
942
943
	}

	cancel()
944
	return nil
945
}