sched_test.go 17.2 KB
Newer Older
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
package server

import (
	"bytes"
	"context"
	"encoding/binary"
	"fmt"
	"log/slog"
	"os"
	"testing"
	"time"

	"github.com/ollama/ollama/api"
	"github.com/ollama/ollama/app/lifecycle"
	"github.com/ollama/ollama/format"
	"github.com/ollama/ollama/gpu"
	"github.com/ollama/ollama/llm"
18
	"github.com/ollama/ollama/server/envconfig"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
19
20
21
22
23
24
25
26
27
28
29
30
31
	"github.com/stretchr/testify/assert"
	"github.com/stretchr/testify/require"
)

func init() {
	os.Setenv("OLLAMA_DEBUG", "1")
	lifecycle.InitLogging()
}

func TestInitScheduler(t *testing.T) {
	ctx, done := context.WithCancel(context.Background())
	defer done()
	s := InitScheduler(ctx)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
33
	require.NotNil(t, s.loaded)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
34
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
35
36
37
}

func TestLoad(t *testing.T) {
38
	ctx, done := context.WithTimeout(context.Background(), 20*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
39
40
	defer done()
	s := InitScheduler(ctx)
41
	var ggml *llm.GGML // value not used in tests
Daniel Hiltgen's avatar
Daniel Hiltgen committed
42
43
44
	req := &LlmRequest{
		ctx:             ctx,
		model:           &Model{ModelPath: "foo"},
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
		opts:            api.DefaultOptions(),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
47
48
49
50
51
52
53
54
		successCh:       make(chan *runnerRef, 1),
		errCh:           make(chan error, 1),
		sessionDuration: 2,
	}
	// Fail to load model first
	s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
		return nil, fmt.Errorf("something failed to load model blah")
	}
	gpus := gpu.GpuInfoList{}
55
	s.load(req, ggml, gpus)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
56
57
	require.Len(t, req.successCh, 0)
	require.Len(t, req.errCh, 1)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
58
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
	require.Len(t, s.loaded, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
60
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
61
62
63
64
65
66
67
	err := <-req.errCh
	require.Contains(t, err.Error(), "this model may be incompatible")

	server := &mockLlm{estimatedVRAM: 10}
	s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
		return server, nil
	}
68
	s.load(req, ggml, gpus)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
70
71
72
73
74
	select {
	case err := <-req.errCh:
		require.NoError(t, err)
	case resp := <-req.successCh:
		require.Equal(t, uint64(10), resp.estimatedVRAM)
		require.Equal(t, uint(1), resp.refCount)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
75
		s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
76
		require.Len(t, s.loaded, 1)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
77
		s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
79
80
81
	}

	req.model.ModelPath = "dummy_model_path"
	server.waitResp = fmt.Errorf("wait failure")
82
	s.load(req, ggml, gpus)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
83
84
85
86
87
88
	select {
	case err := <-req.errCh:
		require.Contains(t, err.Error(), "wait failure")
	case resp := <-req.successCh:
		t.Errorf("unexpected success %v", resp)
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
89
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
	runner := s.loaded["dummy_model_path"]
Daniel Hiltgen's avatar
Daniel Hiltgen committed
91
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
92
93
	require.NotNil(t, runner)
	require.Equal(t, uint(0), runner.refCount)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
94
	time.Sleep(1 * time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
95
96
97
98
99
100
101
102
	require.Len(t, s.expiredCh, 1)
}

type bundle struct {
	ctx     context.Context //nolint:containedctx
	ctxDone func()
	srv     *mockLlm
	req     *LlmRequest
103
	ggml    *llm.GGML
Daniel Hiltgen's avatar
Daniel Hiltgen committed
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
}

func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
	return scenario.srv, nil
}

func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedVRAM uint64) *bundle {
	scenario := &bundle{}
	scenario.ctx, scenario.ctxDone = context.WithCancel(ctx)
	t.Helper()

	f, err := os.CreateTemp(t.TempDir(), modelName)
	assert.Nil(t, err)
	defer f.Close()

	gguf := llm.NewGGUFV3(binary.LittleEndian)
	err = gguf.Encode(f, llm.KV{
		"general.architecture":          "llama",
		"general.name":                  "name",
		"llama.context_length":          uint32(32),
		"llama.embedding_length":        uint32(4096),
		"llama.block_count":             uint32(1),
		"llama.attention.head_count":    uint32(32),
		"llama.attention.head_count_kv": uint32(32),
		"tokenizer.ggml.tokens":         []string{" "},
		"tokenizer.ggml.scores":         []float32{0},
		"tokenizer.ggml.token_type":     []int32{0},
	}, []llm.Tensor{
		{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}},
	})
	assert.Nil(t, err)
135

Daniel Hiltgen's avatar
Daniel Hiltgen committed
136
137
	fname := f.Name()
	model := &Model{Name: modelName, ModelPath: fname}
138
	scenario.ggml, err = llm.LoadModel(model.ModelPath)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
139
	require.NoError(t, err)
140

Daniel Hiltgen's avatar
Daniel Hiltgen committed
141
142
143
	scenario.req = &LlmRequest{
		ctx:             scenario.ctx,
		model:           model,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
144
		opts:            api.DefaultOptions(),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
145
146
147
148
149
150
151
152
153
		sessionDuration: 5 * time.Millisecond,
		successCh:       make(chan *runnerRef, 1),
		errCh:           make(chan error, 1),
	}
	scenario.srv = &mockLlm{estimatedVRAM: estimatedVRAM}
	return scenario
}

func TestRequests(t *testing.T) {
154
	ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
155
156
157
158
159
160
161
	defer done()

	// Same model, same request
	scenario1a := newScenario(t, ctx, "ollama-model-1", 10)
	scenario1a.req.sessionDuration = 0
	scenario1b := newScenario(t, ctx, "ollama-model-1", 11)
	scenario1b.req.model = scenario1a.req.model
162
	scenario1b.ggml = scenario1a.ggml
Daniel Hiltgen's avatar
Daniel Hiltgen committed
163
164
165
166
167
	scenario1b.req.sessionDuration = 0

	// simple reload of same model
	scenario2a := newScenario(t, ctx, "ollama-model-1", 20)
	scenario2a.req.model = scenario1a.req.model
168
	scenario2a.ggml = scenario1a.ggml
Daniel Hiltgen's avatar
Daniel Hiltgen committed
169
170
171
172

	// Multiple loaded models
	scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte)
	scenario3b := newScenario(t, ctx, "ollama-model-3b", 24*format.GigaByte)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
173
174
175
	scenario3c := newScenario(t, ctx, "ollama-model-4a", 30)
	scenario3c.req.opts.NumGPU = 0                           // CPU load, will be allowed
	scenario3d := newScenario(t, ctx, "ollama-model-3c", 30) // Needs prior unloaded
Daniel Hiltgen's avatar
Daniel Hiltgen committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228

	s := InitScheduler(ctx)
	s.getGpuFn = func() gpu.GpuInfoList {
		g := gpu.GpuInfo{Library: "metal"}
		g.TotalMemory = 24 * format.GigaByte
		g.FreeMemory = 12 * format.GigaByte
		return []gpu.GpuInfo{g}
	}
	s.newServerFn = scenario1a.newServer
	slog.Info("scenario1a")
	s.pendingReqCh <- scenario1a.req
	require.Len(t, s.pendingReqCh, 1)
	s.Run(ctx)
	select {
	case resp := <-scenario1a.req.successCh:
		require.Equal(t, resp.llama, scenario1a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario1a.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}

	// Same runner as first request due to not needing a reload
	s.newServerFn = scenario1b.newServer
	slog.Info("scenario1b")
	s.pendingReqCh <- scenario1b.req
	select {
	case resp := <-scenario1b.req.successCh:
		require.Equal(t, resp.llama, scenario1a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario1b.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}

	// Trigger a reload
	s.newServerFn = scenario2a.newServer
	scenario2a.req.model.AdapterPaths = []string{"new"}
	slog.Info("scenario2a")
	s.pendingReqCh <- scenario2a.req
	// finish first two requests, so model can reload
	time.Sleep(1 * time.Millisecond)
	scenario1a.ctxDone()
	scenario1b.ctxDone()
	select {
	case resp := <-scenario2a.req.successCh:
		require.Equal(t, resp.llama, scenario2a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario2a.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}

229
	envconfig.MaxRunners = 1
Daniel Hiltgen's avatar
Daniel Hiltgen committed
230
231
232
233
234
235
236
237
238
239
240
241
242
243
	s.newServerFn = scenario3a.newServer
	slog.Info("scenario3a")
	s.pendingReqCh <- scenario3a.req
	// finish prior request, so new model can load
	time.Sleep(1 * time.Millisecond)
	scenario2a.ctxDone()
	select {
	case resp := <-scenario3a.req.successCh:
		require.Equal(t, resp.llama, scenario3a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario3a.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
244
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
245
	require.Len(t, s.loaded, 1)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
246
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
247

248
	envconfig.MaxRunners = 0
Daniel Hiltgen's avatar
Daniel Hiltgen committed
249
250
251
252
253
254
255
256
257
258
259
	s.newServerFn = scenario3b.newServer
	slog.Info("scenario3b")
	s.pendingReqCh <- scenario3b.req
	select {
	case resp := <-scenario3b.req.successCh:
		require.Equal(t, resp.llama, scenario3b.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario3b.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
260
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
261
	require.Len(t, s.loaded, 2)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
262
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
263

Daniel Hiltgen's avatar
Daniel Hiltgen committed
264
	// This is a CPU load with NumGPU = 0 so it should load
Daniel Hiltgen's avatar
Daniel Hiltgen committed
265
266
	s.newServerFn = scenario3c.newServer
	slog.Info("scenario3c")
Daniel Hiltgen's avatar
Daniel Hiltgen committed
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
	s.pendingReqCh <- scenario3c.req
	select {
	case resp := <-scenario3c.req.successCh:
		require.Equal(t, resp.llama, scenario3c.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, scenario3c.req.errCh, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}
	s.loadedMu.Lock()
	require.Len(t, s.loaded, 3)
	s.loadedMu.Unlock()

	// Try to load a model that wont fit
	s.newServerFn = scenario3d.newServer
	slog.Info("scenario3d")
	s.loadedMu.Lock()
	require.Len(t, s.loaded, 3)
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
286
287
	scenario3a.ctxDone() // Won't help since this one isn't big enough to make room
	time.Sleep(2 * time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
288
	s.pendingReqCh <- scenario3d.req
Daniel Hiltgen's avatar
Daniel Hiltgen committed
289
290
	// finish prior request, so new model can load
	time.Sleep(6 * time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
291
292
293
	s.loadedMu.Lock()
	require.Len(t, s.loaded, 2)
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
294
295
	scenario3b.ctxDone()
	select {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
296
297
	case resp := <-scenario3d.req.successCh:
		require.Equal(t, resp.llama, scenario3d.srv)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
298
		require.Len(t, s.pendingReqCh, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
299
		require.Len(t, scenario3d.req.errCh, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
300
301
302
	case <-ctx.Done():
		t.Errorf("timeout")
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
303
304
305
	s.loadedMu.Lock()
	require.Len(t, s.loaded, 2)
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
306
307
308
}

func TestGetRunner(t *testing.T) {
309
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
310
311
312
313
314
315
316
317
318
	defer done()

	// Same model, same request
	scenario1a := newScenario(t, ctx, "ollama-model-1a", 10)
	scenario1a.req.sessionDuration = 0
	scenario1b := newScenario(t, ctx, "ollama-model-1b", 10)
	scenario1b.req.sessionDuration = 0
	scenario1c := newScenario(t, ctx, "ollama-model-1c", 10)
	scenario1c.req.sessionDuration = 0
319
	envconfig.MaxQueuedRequests = 1
Daniel Hiltgen's avatar
Daniel Hiltgen committed
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
	s := InitScheduler(ctx)
	s.getGpuFn = func() gpu.GpuInfoList {
		g := gpu.GpuInfo{Library: "metal"}
		g.TotalMemory = 24 * format.GigaByte
		g.FreeMemory = 12 * format.GigaByte
		return []gpu.GpuInfo{g}
	}
	s.newServerFn = scenario1a.newServer
	slog.Info("scenario1a")
	successCh1a, errCh1a := s.GetRunner(scenario1a.ctx, scenario1a.req.model, scenario1a.req.opts, scenario1a.req.sessionDuration)
	require.Len(t, s.pendingReqCh, 1)
	slog.Info("scenario1b")
	successCh1b, errCh1b := s.GetRunner(scenario1b.ctx, scenario1b.req.model, scenario1b.req.opts, scenario1b.req.sessionDuration)
	require.Len(t, s.pendingReqCh, 1)
	require.Len(t, successCh1b, 0)
	require.Len(t, errCh1b, 1)
	err := <-errCh1b
	require.Contains(t, err.Error(), "server busy")
	s.Run(ctx)
	select {
	case resp := <-successCh1a:
		require.Equal(t, resp.llama, scenario1a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, errCh1a, 0)
	case <-ctx.Done():
		t.Errorf("timeout")
	}
	scenario1a.ctxDone()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
348
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
349
	require.Len(t, s.loaded, 1)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
350
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
351
352
353
354

	scenario1c.req.model.ModelPath = "bad path"
	slog.Info("scenario1c")
	successCh1c, errCh1c := s.GetRunner(scenario1c.ctx, scenario1c.req.model, scenario1c.req.opts, scenario1c.req.sessionDuration)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
355
	// Starts in pending channel, then should be quickly processsed to return an error
356
	time.Sleep(5 * time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
357
	require.Len(t, successCh1c, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
358
	s.loadedMu.Lock()
359
	require.Len(t, s.loaded, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
360
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
361
362
363
364
365
366
367
368
	require.Len(t, errCh1c, 1)
	err = <-errCh1c
	require.Contains(t, err.Error(), "bad path")
	scenario1b.ctxDone()
}

// TODO - add one scenario that triggers the bogus finished event with positive ref count
func TestPrematureExpired(t *testing.T) {
369
	ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
	defer done()

	// Same model, same request
	scenario1a := newScenario(t, ctx, "ollama-model-1a", 10)
	s := InitScheduler(ctx)
	s.getGpuFn = func() gpu.GpuInfoList {
		g := gpu.GpuInfo{Library: "metal"}
		g.TotalMemory = 24 * format.GigaByte
		g.FreeMemory = 12 * format.GigaByte
		return []gpu.GpuInfo{g}
	}
	s.newServerFn = scenario1a.newServer
	successCh1a, errCh1a := s.GetRunner(scenario1a.ctx, scenario1a.req.model, scenario1a.req.opts, scenario1a.req.sessionDuration)
	require.Len(t, s.pendingReqCh, 1)
	s.Run(ctx)
	select {
	case resp := <-successCh1a:
		require.Equal(t, resp.llama, scenario1a.srv)
		require.Len(t, s.pendingReqCh, 0)
		require.Len(t, errCh1a, 0)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
390
		s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
391
		require.Len(t, s.loaded, 1)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
392
		s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
393
394
395
396
397
398
399
400
401
402
403
		slog.Info("sending premature expired event now")
		s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
	case <-ctx.Done():
		t.Errorf("timeout")
	}
	time.Sleep(scenario1a.req.sessionDuration)
	scenario1a.ctxDone()
	time.Sleep(20 * time.Millisecond)
	require.LessOrEqual(t, len(s.finishedReqCh), 1)
	time.Sleep(10 * time.Millisecond)
	require.Len(t, s.finishedReqCh, 0)
404
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
405
	require.Len(t, s.loaded, 0)
406
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
407
408
409
410
411
412
413

	// also shouldn't happen in real life
	s.finishedReqCh <- scenario1a.req
	time.Sleep(5 * time.Millisecond)
}

func TestUseLoadedRunner(t *testing.T) {
414
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
415
416
	req := &LlmRequest{
		ctx:             ctx,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
417
		opts:            api.DefaultOptions(),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
		successCh:       make(chan *runnerRef, 1),
		sessionDuration: 2,
	}
	finished := make(chan *LlmRequest)
	llm1 := &mockLlm{}
	r1 := &runnerRef{llama: llm1, sessionDuration: 1}
	req.useLoadedRunner(r1, finished)
	require.Equal(t, uint(1), r1.refCount)
	require.Equal(t, time.Duration(2), r1.sessionDuration)
	select {
	case success := <-req.successCh:
		require.Equal(t, r1, success)
	case <-ctx.Done():
		t.Errorf("timeout")
	}
	done()
	fin := <-finished
	require.Equal(t, req, fin)
}

func TestUpdateFreeSpace(t *testing.T) {
439
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
	defer done()
	gpus := gpu.GpuInfoList{
		{
			Library: "a",
			ID:      "1",
		},
		{
			Library: "a",
			ID:      "2",
		},
	}
	gpus[0].TotalMemory = 1000
	gpus[0].FreeMemory = 900
	gpus[1].TotalMemory = 2000
	gpus[1].FreeMemory = 1900
	llm1 := &mockLlm{estimatedVRAM: 100}
	llm2 := &mockLlm{estimatedVRAM: 200}
	r1 := &runnerRef{llama: llm1, gpus: gpus}
	r2 := &runnerRef{llama: llm2, gpus: gpus}

	s := InitScheduler(ctx)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
461
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
462
463
	s.loaded["a"] = r1
	s.loaded["b"] = r2
Daniel Hiltgen's avatar
Daniel Hiltgen committed
464
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
465
466
467
468
469
470
471

	s.updateFreeSpace(gpus)
	require.Equal(t, uint64(850), gpus[0].FreeMemory)
	require.Equal(t, uint64(1850), gpus[1].FreeMemory)
}

func TestFindRunnerToUnload(t *testing.T) {
472
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
473
	defer done()
474

Daniel Hiltgen's avatar
Daniel Hiltgen committed
475
476
477
478
	r1 := &runnerRef{refCount: 1, sessionDuration: 1}
	r2 := &runnerRef{sessionDuration: 2}

	s := InitScheduler(ctx)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
479
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
480
481
	s.loaded["a"] = r1
	s.loaded["b"] = r2
Daniel Hiltgen's avatar
Daniel Hiltgen committed
482
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
483

484
	resp := s.findRunnerToUnload()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
485
486
	require.Equal(t, r2, resp)
	r2.refCount = 1
487
	resp = s.findRunnerToUnload()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
488
489
490
491
492
	require.Equal(t, r1, resp)

}

func TestNeedsReload(t *testing.T) {
493
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
494
495
496
	defer done()

	llm := &mockLlm{}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
497
	do := api.DefaultOptions()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
498
499
500
	runner := &runnerRef{
		adapters:   []string{"adapter1"},
		projectors: []string{"projector1"},
Daniel Hiltgen's avatar
Daniel Hiltgen committed
501
		Options:    &do,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
502
503
504
505
506
507
508
		llama:      llm,
	}
	req := &LlmRequest{
		model: &Model{
			AdapterPaths:   []string{"adapter2"},
			ProjectorPaths: []string{"projector2"},
		},
Daniel Hiltgen's avatar
Daniel Hiltgen committed
509
		opts: api.DefaultOptions(),
Daniel Hiltgen's avatar
Daniel Hiltgen committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
	}
	resp := runner.needsReload(ctx, req)
	require.True(t, resp)
	req.model.AdapterPaths = runner.adapters
	resp = runner.needsReload(ctx, req)
	require.True(t, resp)
	req.model.ProjectorPaths = runner.projectors
	runner.loading = true
	req.opts.NumBatch = 1234
	resp = runner.needsReload(ctx, req)
	require.True(t, resp)
	req.opts.NumBatch = runner.Options.NumBatch
	llm.pingResp = fmt.Errorf("foo")
	resp = runner.needsReload(ctx, req)
	require.True(t, resp)
	llm.pingResp = nil
	resp = runner.needsReload(ctx, req)
	require.False(t, resp)
	req.opts.NumGPU = 99
	resp = runner.needsReload(ctx, req)
530
531
532
	require.True(t, resp)
	req.opts.NumGPU = -1
	resp = runner.needsReload(ctx, req)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
533
534
535
536
	require.False(t, resp)
}

func TestUnloadAllRunners(t *testing.T) {
537
	ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
538
539
540
541
542
543
544
545
546
547
	defer done()

	llm1 := &mockLlm{}
	llm2 := &mockLlm{}
	s := InitScheduler(ctx)
	s.unloadAllRunners()

	r1 := &runnerRef{llama: llm1}
	r2 := &runnerRef{llama: llm2}

Daniel Hiltgen's avatar
Daniel Hiltgen committed
548
	s.loadedMu.Lock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
549
550
	s.loaded["a"] = r1
	s.loaded["b"] = r2
Daniel Hiltgen's avatar
Daniel Hiltgen committed
551
	s.loadedMu.Unlock()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
	s.unloadAllRunners()

	require.True(t, llm1.closeCalled)
	require.True(t, llm2.closeCalled)
}

func TestUnload(t *testing.T) {
	llm1 := &mockLlm{}
	r1 := &runnerRef{llama: llm1}
	r2 := &runnerRef{adapters: []string{"A"}}
	r1.unload()
	require.True(t, llm1.closeCalled)
	r2.unload()
	require.Nil(t, r2.adapters)
}

type mockLlm struct {
	pingResp          error
	waitResp          error
	completionResp    error
	embeddingResp     []float64
	embeddingRespErr  error
	tokenizeResp      []int
	tokenizeRespErr   error
	detokenizeResp    string
	detonekizeRespErr error
	closeResp         error
	closeCalled       bool
	estimatedVRAM     uint64
}

func (s *mockLlm) Ping(ctx context.Context) error             { return s.pingResp }
func (s *mockLlm) WaitUntilRunning(ctx context.Context) error { return s.waitResp }
func (s *mockLlm) Completion(ctx context.Context, req llm.CompletionRequest, fn func(llm.CompletionResponse)) error {
	return s.completionResp
}
func (s *mockLlm) Embedding(ctx context.Context, prompt string) ([]float64, error) {
	return s.embeddingResp, s.embeddingRespErr
}
func (s *mockLlm) Tokenize(ctx context.Context, content string) ([]int, error) {
	return s.tokenizeResp, s.tokenizeRespErr
}
func (s *mockLlm) Detokenize(ctx context.Context, tokens []int) (string, error) {
	return s.detokenizeResp, s.detonekizeRespErr
}
func (s *mockLlm) Close() error {
	s.closeCalled = true
	return s.closeResp
}
func (s *mockLlm) EstimatedVRAM() uint64 { return s.estimatedVRAM }