request_formats_test.rs 9.89 KB
Newer Older
1
2
mod common;

3
4
use std::sync::Arc;

5
6
7
use common::mock_worker::{HealthStatus, MockWorker, MockWorkerConfig, WorkerType};
use reqwest::Client;
use serde_json::json;
8
9
10
11
use sglang_router_rs::{
    config::{RouterConfig, RoutingMode},
    routers::{RouterFactory, RouterTrait},
};
12

13
14
/// Test context that manages mock workers
struct TestContext {
15
    workers: Vec<MockWorker>,
16
17
    _router: Arc<dyn RouterTrait>,
    worker_urls: Vec<String>,
18
19
}

20
impl TestContext {
21
    async fn new(worker_configs: Vec<MockWorkerConfig>) -> Self {
22
23
24
25
26
27
        let mut config = RouterConfig::builder()
            .regular_mode(vec![])
            .port(3003)
            .worker_startup_timeout_secs(1)
            .worker_startup_check_interval_secs(1)
            .build_unchecked();
28

29
30
        let mut workers = Vec::new();
        let mut worker_urls = Vec::new();
31

32
33
34
35
36
37
        for worker_config in worker_configs {
            let mut worker = MockWorker::new(worker_config);
            let url = worker.start().await.unwrap();
            worker_urls.push(url);
            workers.push(worker);
        }
38

39
40
        if !workers.is_empty() {
            tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
41
42
        }

43
44
45
46
47
48
        config.mode = RoutingMode::Regular {
            worker_urls: worker_urls.clone(),
        };

        let app_context = common::create_test_context(config.clone());

49
        let router = RouterFactory::create_router(&app_context).await.unwrap();
50
        let router = Arc::from(router);
51

52
53
54
55
        if !workers.is_empty() {
            tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
        }

56
57
58
59
60
        Self {
            workers,
            _router: router,
            worker_urls: worker_urls.clone(),
        }
61
62
63
    }

    async fn shutdown(mut self) {
64
65
66
        // Small delay to ensure any pending operations complete
        tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;

67
68
69
        for worker in &mut self.workers {
            worker.stop().await;
        }
70
71
72
73
74
75
76
77
78
79
80
81

        // Another small delay to ensure cleanup completes
        tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
    }

    async fn make_request(
        &self,
        endpoint: &str,
        body: serde_json::Value,
    ) -> Result<serde_json::Value, String> {
        let client = Client::new();

82
83
84
85
86
        // Use the first worker URL from the context
        let worker_url = self
            .worker_urls
            .first()
            .ok_or_else(|| "No workers available".to_string())?;
87
88

        let response = client
89
            .post(format!("{}{}", worker_url, endpoint))
90
91
92
93
94
95
96
97
98
99
100
101
102
            .json(&body)
            .send()
            .await
            .map_err(|e| format!("Request failed: {}", e))?;

        if !response.status().is_success() {
            return Err(format!("Request failed with status: {}", response.status()));
        }

        response
            .json::<serde_json::Value>()
            .await
            .map_err(|e| format!("Failed to parse response: {}", e))
103
104
105
106
    }
}

#[cfg(test)]
107
mod request_format_tests {
108
109
    use super::*;

110
111
112
113
114
115
116
117
118
119
120
121
122
123
    #[tokio::test]
    async fn test_generate_request_formats() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19001,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({
            "text": "Hello, world!",
            "stream": false
124
125
        });

126
127
        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());
128

129
130
131
        let payload = json!({
            "text": "Tell me a story",
            "sampling_params": {
132
133
                "temperature": 0.7,
                "max_new_tokens": 100,
134
135
136
                "top_p": 0.9
            },
            "stream": false
137
138
        });

139
140
        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());
141

142
143
144
145
146
147
148
        let payload = json!({
            "input_ids": [1, 2, 3, 4, 5],
            "sampling_params": {
                "temperature": 0.0,
                "max_new_tokens": 50
            },
            "stream": false
149
        });
150
151
152
153
154

        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());

        ctx.shutdown().await;
155
156
    }

157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    #[tokio::test]
    async fn test_v1_chat_completions_formats() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19002,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({
            "model": "test-model",
            "messages": [
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": "Hello!"}
            ],
            "stream": false
175
        });
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

        let result = ctx.make_request("/v1/chat/completions", payload).await;
        assert!(result.is_ok());

        let response = result.unwrap();
        assert!(response.get("choices").is_some());
        assert!(response.get("id").is_some());
        assert_eq!(
            response.get("object").and_then(|v| v.as_str()),
            Some("chat.completion")
        );

        let payload = json!({
            "model": "test-model",
            "messages": [
                {"role": "user", "content": "Tell me a joke"}
            ],
            "temperature": 0.8,
            "max_tokens": 150,
            "top_p": 0.95,
            "stream": false
        });

        let result = ctx.make_request("/v1/chat/completions", payload).await;
        assert!(result.is_ok());

        ctx.shutdown().await;
203
204
    }

205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
    #[tokio::test]
    async fn test_v1_completions_formats() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19003,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({
            "model": "test-model",
            "prompt": "Once upon a time",
            "max_tokens": 50,
            "stream": false
        });
222

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        let result = ctx.make_request("/v1/completions", payload).await;
        assert!(result.is_ok());

        let response = result.unwrap();
        assert!(response.get("choices").is_some());
        assert_eq!(
            response.get("object").and_then(|v| v.as_str()),
            Some("text_completion")
        );

        let payload = json!({
            "model": "test-model",
            "prompt": ["First prompt", "Second prompt"],
            "temperature": 0.5,
            "stream": false
238
239
        });

240
241
242
243
244
245
246
247
248
        let result = ctx.make_request("/v1/completions", payload).await;
        assert!(result.is_ok());

        let payload = json!({
            "model": "test-model",
            "prompt": "The capital of France is",
            "max_tokens": 10,
            "logprobs": 5,
            "stream": false
249
        });
250
251
252
253
254

        let result = ctx.make_request("/v1/completions", payload).await;
        assert!(result.is_ok());

        ctx.shutdown().await;
255
256
    }

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
    #[tokio::test]
    async fn test_batch_requests() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19004,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({
            "text": ["First text", "Second text", "Third text"],
            "sampling_params": {
                "temperature": 0.7,
                "max_new_tokens": 50
            },
            "stream": false
275
276
        });

277
278
279
280
281
282
        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());

        let payload = json!({
            "input_ids": [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
            "stream": false
283
        });
284
285
286
287
288

        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());

        ctx.shutdown().await;
289
290
    }

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
    #[tokio::test]
    async fn test_special_parameters() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19005,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({
            "text": "Test",
            "return_logprob": true,
            "stream": false
306
307
        });

308
309
        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());
310

311
312
313
314
315
316
317
        let payload = json!({
            "text": "Generate JSON",
            "sampling_params": {
                "temperature": 0.0,
                "json_schema": "$$ANY$$"
            },
            "stream": false
318
319
        });

320
321
322
323
324
325
326
327
328
329
330
        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());

        let payload = json!({
            "text": "Continue forever",
            "sampling_params": {
                "temperature": 0.7,
                "max_new_tokens": 100,
                "ignore_eos": true
            },
            "stream": false
331
        });
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356

        let result = ctx.make_request("/generate", payload).await;
        assert!(result.is_ok());

        ctx.shutdown().await;
    }

    #[tokio::test]
    async fn test_error_handling() {
        let ctx = TestContext::new(vec![MockWorkerConfig {
            port: 19006,
            worker_type: WorkerType::Regular,
            health_status: HealthStatus::Healthy,
            response_delay_ms: 0,
            fail_rate: 0.0,
        }])
        .await;

        let payload = json!({});

        let result = ctx.make_request("/generate", payload).await;
        // Mock worker accepts empty body
        assert!(result.is_ok());

        ctx.shutdown().await;
357
358
    }
}