router.rs 11 KB
Newer Older
1
2
// gRPC Router Implementation

3
use std::sync::Arc;
4
5
6
7
8

use async_trait::async_trait;
use axum::{
    body::Body,
    extract::Request,
9
    http::{HeaderMap, StatusCode},
10
11
    response::{IntoResponse, Response},
};
12
use tracing::debug;
13

14
15
use super::{
    context::SharedComponents,
16
17
18
19
    harmony::{
        serve_harmony_responses, serve_harmony_responses_stream, HarmonyDetector,
        HarmonyResponsesContext,
    },
20
21
22
    pipeline::RequestPipeline,
    responses,
};
23
use crate::{
24
    app_context::AppContext,
25
26
27
    core::WorkerRegistry,
    protocols::{
        chat::ChatCompletionRequest,
28
        classify::ClassifyRequest,
29
30
31
32
33
34
35
36
        completion::CompletionRequest,
        embedding::EmbeddingRequest,
        generate::GenerateRequest,
        rerank::RerankRequest,
        responses::{ResponsesGetParams, ResponsesRequest},
    },
    routers::RouterTrait,
};
37

38
/// gRPC router implementation for SGLang
39
#[derive(Clone)]
40
#[allow(dead_code)]
41
pub struct GrpcRouter {
42
    worker_registry: Arc<WorkerRegistry>,
43
    pipeline: RequestPipeline,
44
    harmony_pipeline: RequestPipeline,
45
    shared_components: Arc<SharedComponents>,
46
47
    // Responses context (bundles all /v1/responses dependencies: storage, MCP, background_tasks)
    responses_context: responses::ResponsesContext,
48
49
    // Harmony responses context (uses harmony pipeline)
    harmony_responses_context: responses::ResponsesContext,
50
}
51
52

impl GrpcRouter {
53
    /// Create a new gRPC router
54
    pub async fn new(ctx: &Arc<AppContext>) -> Result<Self, String> {
55
56
57
58
59
60
61
62
63
64
65
        // Extract necessary components from context
        let tokenizer = ctx
            .tokenizer
            .as_ref()
            .ok_or_else(|| "gRPC router requires tokenizer".to_string())?
            .clone();
        let reasoning_parser_factory = ctx
            .reasoning_parser_factory
            .as_ref()
            .ok_or_else(|| "gRPC router requires reasoning parser factory".to_string())?
            .clone();
66
67
68
69
70
        let tool_parser_factory = ctx
            .tool_parser_factory
            .as_ref()
            .ok_or_else(|| "gRPC router requires tool parser factory".to_string())?
            .clone();
71

72
        let worker_registry = ctx.worker_registry.clone();
73
        let _policy_registry = ctx.policy_registry.clone();
Chang Su's avatar
Chang Su committed
74

75
        // Create shared components for pipeline
76
        let shared_components = Arc::new(SharedComponents {
77
78
79
80
81
            tokenizer: tokenizer.clone(),
            tool_parser_factory: tool_parser_factory.clone(),
            reasoning_parser_factory: reasoning_parser_factory.clone(),
        });

82
        // Create regular pipeline
83
84
        let pipeline = RequestPipeline::new_regular(
            worker_registry.clone(),
85
            _policy_registry.clone(),
86
87
88
89
90
91
92
            tokenizer.clone(),
            tool_parser_factory.clone(),
            reasoning_parser_factory.clone(),
            ctx.configured_tool_parser.clone(),
            ctx.configured_reasoning_parser.clone(),
        );

93
94
        // Create Harmony pipelines
        let harmony_pipeline = RequestPipeline::new_harmony(
95
            worker_registry.clone(),
96
97
98
99
100
101
            _policy_registry.clone(),
            tokenizer.clone(),
            tool_parser_factory.clone(),
            reasoning_parser_factory.clone(),
            ctx.configured_tool_parser.clone(),
            ctx.configured_reasoning_parser.clone(),
102
103
        );

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
        // Extract shared dependencies for responses contexts
        let mcp_manager = ctx
            .mcp_manager
            .get()
            .ok_or_else(|| "gRPC router requires MCP manager".to_string())?
            .clone();

        // Helper closure to create responses context with a given pipeline
        let create_responses_context = |pipeline: &RequestPipeline| {
            responses::ResponsesContext::new(
                Arc::new(pipeline.clone()),
                shared_components.clone(),
                worker_registry.clone(),
                ctx.response_storage.clone(),
                ctx.conversation_storage.clone(),
                ctx.conversation_item_storage.clone(),
                mcp_manager.clone(),
            )
        };

        // Create responses contexts for both pipelines
        let responses_context = create_responses_context(&pipeline);
        let harmony_responses_context = create_responses_context(&harmony_pipeline);

128
        Ok(GrpcRouter {
129
            worker_registry,
130
            pipeline,
131
            harmony_pipeline,
132
            shared_components,
133
            responses_context,
134
            harmony_responses_context,
135
136
        })
    }
137
138
139
140

    /// Main route_chat implementation
    async fn route_chat_impl(
        &self,
141
        headers: Option<&HeaderMap>,
142
143
144
        body: &ChatCompletionRequest,
        model_id: Option<&str>,
    ) -> Response {
145
146
147
        // Choose Harmony pipeline if model indicates Harmony
        let is_harmony = HarmonyDetector::is_harmony_model(&body.model);

148
        debug!(
149
150
            "Processing chat completion request for model: {:?}, using_harmony={}",
            model_id, is_harmony
151
152
        );

153
154
155
156
157
158
159
160
        let pipeline = if is_harmony {
            &self.harmony_pipeline
        } else {
            &self.pipeline
        };

        // Use selected pipeline for ALL requests (streaming and non-streaming)
        pipeline
161
            .execute_chat(
162
                Arc::new(body.clone()),
163
164
165
166
167
                headers.cloned(),
                model_id.map(|s| s.to_string()),
                self.shared_components.clone(),
            )
            .await
168
169
    }

170
171
172
    /// Main route_generate implementation
    async fn route_generate_impl(
        &self,
173
        headers: Option<&HeaderMap>,
174
175
176
177
178
        body: &GenerateRequest,
        model_id: Option<&str>,
    ) -> Response {
        debug!("Processing generate request for model: {:?}", model_id);

179
180
181
        // Use pipeline for ALL requests (streaming and non-streaming)
        self.pipeline
            .execute_generate(
182
                Arc::new(body.clone()),
183
184
185
                headers.cloned(),
                model_id.map(|s| s.to_string()),
                self.shared_components.clone(),
186
            )
187
            .await
188
    }
189
190
191
192
193
194
195
196
197

    /// Main route_responses implementation (pipeline-based for Harmony)
    async fn route_responses_impl(
        &self,
        _headers: Option<&HeaderMap>,
        body: &ResponsesRequest,
        model_id: Option<&str>,
    ) -> Response {
        debug!(
198
199
            "Processing Harmony responses request for model: {:?}, streaming: {:?}",
            model_id, body.stream
200
201
202
203
204
205
206
207
208
209
        );

        // Create HarmonyResponsesContext from existing responses context
        let harmony_ctx = HarmonyResponsesContext::new(
            Arc::new(self.harmony_pipeline.clone()),
            self.shared_components.clone(),
            self.harmony_responses_context.mcp_manager.clone(),
            self.harmony_responses_context.response_storage.clone(),
        );

210
211
212
213
214
215
216
217
218
        // Check if streaming is requested
        if body.stream.unwrap_or(false) {
            serve_harmony_responses_stream(&harmony_ctx, body.clone()).await
        } else {
            // Use non-streaming version for standard JSON responses
            match serve_harmony_responses(&harmony_ctx, body.clone()).await {
                Ok(response) => axum::Json(response).into_response(),
                Err(error_response) => error_response,
            }
219
220
        }
    }
221
222
223
224
}

impl std::fmt::Debug for GrpcRouter {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
225
        let stats = self.worker_registry.stats();
226
        f.debug_struct("GrpcRouter")
227
            .field("workers_count", &stats.total_workers)
228
            .finish()
229
230
231
232
233
234
235
236
237
238
    }
}

#[async_trait]
impl RouterTrait for GrpcRouter {
    fn as_any(&self) -> &dyn std::any::Any {
        self
    }

    async fn health_generate(&self, _req: Request<Body>) -> Response {
239
240
241
242
243
244
        // TODO: Implement actual generation test for gRPC
        (
            StatusCode::NOT_IMPLEMENTED,
            "Health generate not yet implemented for gRPC",
        )
            .into_response()
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
    }

    async fn get_server_info(&self, _req: Request<Body>) -> Response {
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

    async fn get_models(&self, _req: Request<Body>) -> Response {
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

    async fn get_model_info(&self, _req: Request<Body>) -> Response {
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

    async fn route_generate(
        &self,
261
262
263
        headers: Option<&HeaderMap>,
        body: &GenerateRequest,
        model_id: Option<&str>,
264
    ) -> Response {
265
        self.route_generate_impl(headers, body, model_id).await
266
267
268
269
    }

    async fn route_chat(
        &self,
270
        headers: Option<&HeaderMap>,
271
        body: &ChatCompletionRequest,
272
        model_id: Option<&str>,
273
    ) -> Response {
274
        self.route_chat_impl(headers, body, model_id).await
275
276
277
278
279
    }

    async fn route_completion(
        &self,
        _headers: Option<&HeaderMap>,
280
        _body: &CompletionRequest,
281
        _model_id: Option<&str>,
282
283
284
285
    ) -> Response {
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

286
287
    async fn route_responses(
        &self,
288
289
290
        headers: Option<&HeaderMap>,
        body: &ResponsesRequest,
        model_id: Option<&str>,
291
    ) -> Response {
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
        // Choose implementation based on Harmony model detection
        let is_harmony = HarmonyDetector::is_harmony_model(&body.model);

        debug!(
            "Processing responses request for model: {:?}, using_harmony={}",
            model_id, is_harmony
        );

        if is_harmony {
            // Use pipeline-based implementation for Harmony models
            self.route_responses_impl(headers, body, model_id).await
        } else {
            // Use legacy responses module for non-Harmony models
            responses::route_responses(
                &self.responses_context,
                Arc::new(body.clone()),
                headers.cloned(),
                model_id.map(|s| s.to_string()),
            )
            .await
        }
313
314
    }

315
316
317
    async fn get_response(
        &self,
        _headers: Option<&HeaderMap>,
318
        response_id: &str,
319
        _params: &ResponsesGetParams,
320
    ) -> Response {
321
        responses::get_response_impl(&self.responses_context, response_id).await
322
323
    }

324
    async fn cancel_response(&self, _headers: Option<&HeaderMap>, response_id: &str) -> Response {
325
        responses::cancel_response_impl(&self.responses_context, response_id).await
326
327
    }

328
    async fn route_embeddings(
329
330
        &self,
        _headers: Option<&HeaderMap>,
331
        _body: &EmbeddingRequest,
332
333
334
335
336
        _model_id: Option<&str>,
    ) -> Response {
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

337
    async fn route_classify(
338
339
        &self,
        _headers: Option<&HeaderMap>,
340
        _body: &ClassifyRequest,
341
342
        _model_id: Option<&str>,
    ) -> Response {
343
344
345
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

346
347
348
    async fn route_rerank(
        &self,
        _headers: Option<&HeaderMap>,
349
        _body: &RerankRequest,
350
        _model_id: Option<&str>,
351
    ) -> Response {
352
353
354
355
356
357
358
        (StatusCode::NOT_IMPLEMENTED).into_response()
    }

    fn router_type(&self) -> &'static str {
        "grpc"
    }
}