concurrency_test.go 6.13 KB
Newer Older
Daniel Hiltgen's avatar
Daniel Hiltgen committed
1
2
3
4
5
6
//go:build integration

package integration

import (
	"context"
7
	"fmt"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
8
	"log/slog"
9
	"math"
10
	"math/rand"
Michael Yang's avatar
Michael Yang committed
11
	"os"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
13
14
15
16
	"strconv"
	"sync"
	"testing"
	"time"

Michael Yang's avatar
uint64  
Michael Yang committed
17
	"github.com/ollama/ollama/api"
18
	"github.com/ollama/ollama/envconfig"
Michael Yang's avatar
uint64  
Michael Yang committed
19
	"github.com/ollama/ollama/format"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
20
21
)

22
// Send multiple requests in parallel (concurrently) to a single model and ensure responses are expected
23
func TestConcurrentChat(t *testing.T) {
24
	// Assumes all requests have the same model
25
	req, resp := ChatRequests()
26
27
	numParallel := int(envconfig.NumParallel() + 1)
	iterLimit := 3
Daniel Hiltgen's avatar
Daniel Hiltgen committed
28

29
30
	softTimeout, hardTimeout := getTimeouts(t)
	ctx, cancel := context.WithTimeout(context.Background(), hardTimeout)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
31
32
33
34
35
	defer cancel()
	client, _, cleanup := InitServerConnection(ctx, t)
	defer cleanup()

	// Get the server running (if applicable) warm the model up with a single initial request
36
37
38
39
40
41
42
43
	slog.Info("loading", "model", req[0].Model)
	err := client.Generate(ctx,
		&api.GenerateRequest{Model: req[0].Model, KeepAlive: &api.Duration{Duration: 10 * time.Second}},
		func(response api.GenerateResponse) error { return nil },
	)
	if err != nil {
		t.Fatalf("failed to load model %s: %s", req[0].Model, err)
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
44
45

	var wg sync.WaitGroup
46
47
48
	r := rand.New(rand.NewSource(0))
	wg.Add(numParallel)
	for i := range numParallel {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
49
50
		go func(i int) {
			defer wg.Done()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
51
			for j := 0; j < iterLimit; j++ {
52
53
54
55
56
57
				if time.Now().Sub(started) > softTimeout {
					slog.Info("exceeded soft timeout, winding down test")
					return
				}
				k := r.Int() % len(req)
				slog.Info("Starting", "thread", i, "iter", j)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
58
				// On slower GPUs it can take a while to process the concurrent requests
Daniel Hiltgen's avatar
Daniel Hiltgen committed
59
				// so we allow a much longer initial timeout
60
				DoChat(ctx, t, client, req[k], resp[k], 120*time.Second, 20*time.Second)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
61
62
63
64
65
66
			}
		}(i)
	}
	wg.Wait()
}

67
68
// Stress the scheduler and attempt to load more models than will fit to cause thrashing
// This test will always load at least 2 models even on CPU based systems
Daniel Hiltgen's avatar
Daniel Hiltgen committed
69
func TestMultiModelStress(t *testing.T) {
70
	s := os.Getenv("OLLAMA_MAX_VRAM")
Michael Yang's avatar
uint64  
Michael Yang committed
71
	if s == "" {
72
		s = "0"
Daniel Hiltgen's avatar
Daniel Hiltgen committed
73
	}
Michael Yang's avatar
uint64  
Michael Yang committed
74
75
76
77
78

	maxVram, err := strconv.ParseUint(s, 10, 64)
	if err != nil {
		t.Fatal(err)
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
79

80
	// All models compatible with ollama-engine
81
82
83
	smallModels := []string{
		"llama3.2:1b",
		"qwen3:0.6b",
84
85
86
		"gemma2:2b",
		"deepseek-r1:1.5b", // qwen2 arch
		"gemma3:270m",
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
	}
88
	mediumModels := []string{
89
90
91
92
93
94
		"llama3.2:3b",    // ~3.4G
		"qwen3:8b",       // ~6.6G
		"gpt-oss:20b",    // ~15G
		"deepseek-r1:7b", // ~5.6G
		"gemma3:4b",      // ~5.8G
		"gemma2:9b",      // ~8.1G
Daniel Hiltgen's avatar
Daniel Hiltgen committed
95
96
	}

97
	var chosenModels []string
Daniel Hiltgen's avatar
Daniel Hiltgen committed
98
	switch {
Michael Yang's avatar
uint64  
Michael Yang committed
99
	case maxVram < 10000*format.MebiByte:
Daniel Hiltgen's avatar
Daniel Hiltgen committed
100
101
102
103
104
105
106
		slog.Info("selecting small models")
		chosenModels = smallModels
	default:
		slog.Info("selecting medium models")
		chosenModels = mediumModels
	}

107
108
	softTimeout, hardTimeout := getTimeouts(t)
	ctx, cancel := context.WithTimeout(context.Background(), hardTimeout)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
109
110
111
	defer cancel()
	client, _, cleanup := InitServerConnection(ctx, t)
	defer cleanup()
112
113
	initialTimeout := 120 * time.Second
	streamTimeout := 20 * time.Second
Daniel Hiltgen's avatar
Daniel Hiltgen committed
114
115

	// Make sure all the models are pulled before we get started
116
	for _, model := range chosenModels {
117
118
119
		if err := PullIfMissing(ctx, client, model); err != nil {
			t.Fatal(err)
		}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
120
121
	}

122
123
124
125
	// Determine how many models we can load in parallel before we exceed VRAM
	// The intent is to go 1 over what can fit so we force the scheduler to thrash
	targetLoadCount := 0
	slog.Info("Loading models to find how many can fit in VRAM before overflowing")
126
chooseModels:
127
128
129
130
131
132
	for i, model := range chosenModels {
		req := &api.GenerateRequest{Model: model}
		slog.Info("loading", "model", model)
		err = client.Generate(ctx, req, func(response api.GenerateResponse) error { return nil })
		if err != nil {
			t.Fatalf("failed to load model %s: %s", model, err)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
133
		}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
		targetLoadCount++
		if i > 0 {
			models, err := client.ListRunning(ctx)
			if err != nil {
				t.Fatalf("failed to list running models: %s", err)
			}
			if len(models.Models) < targetLoadCount {
				loaded := []string{}
				for _, m := range models.Models {
					loaded = append(loaded, m.Name)
				}
				slog.Info("found model load capacity", "target", targetLoadCount, "current", loaded, "chosen", chosenModels[:targetLoadCount])
				break
			}
148
149
150
151
			// Effectively limit model count to 2 on CPU only systems to avoid thrashing and timeouts
			for _, m := range models.Models {
				if m.SizeVRAM == 0 {
					slog.Info("model running on CPU", "name", m.Name, "target", targetLoadCount, "chosen", chosenModels[:targetLoadCount])
152
153
					initialTimeout = 240 * time.Second
					streamTimeout = 30 * time.Second
154
155
156
					break chooseModels
				}
			}
157
158
159
160
161
162
		}
	}
	if targetLoadCount == len(chosenModels) {
		// TODO consider retrying the medium models
		slog.Warn("all models being used without exceeding VRAM, set OLLAMA_MAX_VRAM so test can pick larger models")
	}
Daniel Hiltgen's avatar
Daniel Hiltgen committed
163

164
165
166
	r := rand.New(rand.NewSource(0))
	var wg sync.WaitGroup
	for i := range targetLoadCount {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
167
168
169
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
170
			reqs, resps := ChatRequests()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
171
			for j := 0; j < 3; j++ {
172
173
174
175
176
177
				if time.Now().Sub(started) > softTimeout {
					slog.Info("exceeded soft timeout, winding down test")
					return
				}
				k := r.Int() % len(reqs)
				reqs[k].Model = chosenModels[i]
178
				slog.Info("Starting", "model", reqs[k].Model, "iteration", j, "request", reqs[k].Messages[0].Content)
179
				DoChat(ctx, t, client, reqs[k], resps[k], initialTimeout, streamTimeout)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
180
181
182
			}
		}(i)
	}
183
184
	go func() {
		for {
185
			time.Sleep(10 * time.Second)
186
187
188
189
190
191
192
193
194
195
			select {
			case <-ctx.Done():
				return
			default:
				models, err := client.ListRunning(ctx)
				if err != nil {
					slog.Warn("failed to list running models", "error", err)
					continue
				}
				for _, m := range models.Models {
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
					var procStr string
					switch {
					case m.SizeVRAM == 0:
						procStr = "100% CPU"
					case m.SizeVRAM == m.Size:
						procStr = "100% GPU"
					case m.SizeVRAM > m.Size || m.Size == 0:
						procStr = "Unknown"
					default:
						sizeCPU := m.Size - m.SizeVRAM
						cpuPercent := math.Round(float64(sizeCPU) / float64(m.Size) * 100)
						procStr = fmt.Sprintf("%d%%/%d%%", int(cpuPercent), int(100-cpuPercent))
					}

					slog.Info("loaded model snapshot", "model", m.Name, "CPU/GPU", procStr, "expires", format.HumanTime(m.ExpiresAt, "Never"))
211
212
213
214
				}
			}
		}
	}()
Daniel Hiltgen's avatar
Daniel Hiltgen committed
215
216
	wg.Wait()
}