handlers.rs 33.2 KB
Newer Older
1
2
//! Handler functions for /v1/responses endpoints
//!
3
4
5
6
7
8
9
10
11
//! # Public API
//!
//! - `route_responses()` - POST /v1/responses (main entry point)
//! - `get_response_impl()` - GET /v1/responses/{response_id}
//! - `cancel_response_impl()` - POST /v1/responses/{response_id}/cancel
//!
//! # Architecture
//!
//! This module orchestrates all request handling for the /v1/responses endpoint.
12
//! It supports two execution modes:
13
14
//!
//! 1. **Synchronous** - Returns complete response immediately
15
16
17
18
//! 2. **Streaming** - Returns SSE stream with real-time events
//!
//! Note: Background mode is no longer supported. Requests with background=true
//! will be rejected with a 400 error.
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
//!
//! # Request Flow
//!
//! ```text
//! route_responses()
//!   ├─► route_responses_sync()       → route_responses_internal()
//!   └─► route_responses_streaming()  → convert_chat_stream_to_responses_stream()
//!
//! route_responses_internal()
//!   ├─► load_conversation_history()
//!   ├─► execute_tool_loop() (if MCP tools)
//!   │   └─► pipeline.execute_chat_for_responses() [loop]
//!   └─► execute_without_mcp() (if no MCP tools)
//!       └─► pipeline.execute_chat_for_responses()
//! ```
34

35
use std::sync::Arc;
36
37
38
39
40
41
42
43
44

use axum::{
    body::Body,
    http::{self, header, StatusCode},
    response::{IntoResponse, Response},
};
use bytes::Bytes;
use futures_util::StreamExt;
use serde_json::json;
45
use tokio::sync::mpsc;
46
use tokio_stream::wrappers::UnboundedReceiverStream;
47
use tracing::{debug, warn};
48
use uuid::Uuid;
49
use validator::Validate;
50
51
52

use super::{
    conversions,
53
    tool_loop::{execute_tool_loop, execute_tool_loop_streaming},
54
55
56
};
use crate::{
    data_connector::{
57
58
        self, ConversationId, ConversationItemStorage, ConversationStorage, ResponseId,
        ResponseStorage,
59
60
    },
    protocols::{
61
62
        chat::{self, ChatCompletionStreamResponse},
        common,
63
        responses::{
64
65
66
            self, ResponseContentPart, ResponseInput, ResponseInputOutputItem, ResponseOutputItem,
            ResponseReasoningContent, ResponseStatus, ResponsesRequest, ResponsesResponse,
            ResponsesUsage,
67
68
        },
    },
69
    routers::{
70
        grpc::{common::responses::streaming::ResponseStreamEventEmitter, error},
71
72
        openai::{conversations::persist_conversation_items, mcp::ensure_request_mcp_client},
    },
73
74
75
76
77
78
};

/// Main handler for POST /v1/responses
///
/// Validates request, determines execution mode (sync/async/streaming), and delegates
pub async fn route_responses(
79
    ctx: &super::context::ResponsesContext,
80
81
82
83
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    // 0. Fast worker validation (fail-fast before expensive operations)
    let requested_model: Option<&str> = model_id.as_deref().or(Some(request.model.as_str()));

    if let Some(model) = requested_model {
        // Check if any workers support this model
        let available_models = ctx.worker_registry.get_models();

        if !available_models.contains(&model.to_string()) {
            return (
                StatusCode::SERVICE_UNAVAILABLE,
                axum::Json(json!({
                    "error": {
                        "message": format!(
                            "No workers available for model '{}'. Available models: {}",
                            model,
                            available_models.join(", ")
                        ),
                        "type": "service_unavailable",
                        "param": "model",
                        "code": "no_available_workers"
                    }
                })),
            )
                .into_response();
        }
    }

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
    // 1. Validate request (includes conversation ID format)
    if let Err(validation_errors) = request.validate() {
        // Extract the first error message for conversation field
        let error_message = validation_errors
            .field_errors()
            .get("conversation")
            .and_then(|errors| errors.first())
            .and_then(|error| error.message.as_ref())
            .map(|msg| msg.to_string())
            .unwrap_or_else(|| "Invalid request parameters".to_string());

        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": error_message,
                    "type": "invalid_request_error",
                    "param": "conversation",
                    "code": "invalid_value"
                }
            })),
        )
            .into_response();
    }

    // 2. Validate mutually exclusive parameters
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
    if request.previous_response_id.is_some() && request.conversation.is_some() {
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": "Mutually exclusive parameters. Ensure you are only providing one of: 'previous_response_id' or 'conversation'.",
                    "type": "invalid_request_error",
                    "param": serde_json::Value::Null,
                    "code": "mutually_exclusive_parameters"
                }
            })),
        )
            .into_response();
    }

152
    // 3. Reject background mode (no longer supported)
153
    let is_background = request.background.unwrap_or(false);
154
    if is_background {
155
156
157
158
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
159
                    "message": "Background mode is not supported. Please set 'background' to false or omit it.",
160
                    "type": "invalid_request_error",
161
162
                    "param": "background",
                    "code": "unsupported_parameter"
163
164
165
166
167
168
                }
            })),
        )
            .into_response();
    }

169
    // 4. Route based on execution mode
170
    let is_streaming = request.stream.unwrap_or(false);
171
    if is_streaming {
172
        route_responses_streaming(ctx, request, headers, model_id).await
173
    } else {
174
        route_responses_sync(ctx, request, headers, model_id, None).await
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
    }
}

// ============================================================================
// Synchronous Execution
// ============================================================================

/// Execute synchronous responses request
///
/// This is the core execution path that:
/// 1. Loads conversation history / response chain
/// 2. Converts to ChatCompletionRequest
/// 3. Executes chat pipeline
/// 4. Converts back to ResponsesResponse
/// 5. Persists to storage
async fn route_responses_sync(
191
    ctx: &super::context::ResponsesContext,
192
193
194
195
196
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
) -> Response {
197
    match route_responses_internal(ctx, request, headers, model_id, response_id).await {
198
        Ok(responses_response) => axum::Json(responses_response).into_response(),
199
        Err(response) => response, // Already a Response with proper status code
200
201
202
203
204
    }
}

/// Internal implementation that returns Result for background task compatibility
async fn route_responses_internal(
205
    ctx: &super::context::ResponsesContext,
206
207
208
209
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
210
) -> Result<ResponsesResponse, Response> {
211
    // 1. Load conversation history and build modified request
212
    let modified_request = load_conversation_history(ctx, &request).await?;
213
214
215

    // 2. Check if request has MCP tools - if so, use tool loop
    let responses_response = if let Some(tools) = &request.tools {
216
217
218
219
        // Ensure dynamic MCP client is registered for request-scoped tools
        if ensure_request_mcp_client(&ctx.mcp_manager, tools)
            .await
            .is_some()
220
        {
221
222
223
224
            debug!("MCP tools detected, using tool loop");

            // Execute with MCP tool loop
            execute_tool_loop(
225
                ctx,
226
227
228
229
230
231
232
233
                modified_request,
                &request,
                headers,
                model_id,
                response_id.clone(),
            )
            .await?
        } else {
234
235
            debug!("Failed to create MCP client from request tools");
            // Fall through to non-MCP execution
236
            execute_without_mcp(
237
                ctx,
238
239
240
241
242
243
244
245
246
247
248
                &modified_request,
                &request,
                headers,
                model_id,
                response_id.clone(),
            )
            .await?
        }
    } else {
        // No tools, execute normally
        execute_without_mcp(
249
            ctx,
250
251
252
253
254
255
256
257
258
259
260
261
262
            &modified_request,
            &request,
            headers,
            model_id,
            response_id.clone(),
        )
        .await?
    };

    // 5. Persist response to storage if store=true
    if request.store.unwrap_or(true) {
        if let Ok(response_json) = serde_json::to_value(&responses_response) {
            if let Err(e) = persist_conversation_items(
263
264
265
                ctx.conversation_storage.clone(),
                ctx.conversation_item_storage.clone(),
                ctx.response_storage.clone(),
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
                &response_json,
                &request,
            )
            .await
            {
                warn!("Failed to persist response: {}", e);
            }
        }
    }

    Ok(responses_response)
}

/// Execute streaming responses request
async fn route_responses_streaming(
281
    ctx: &super::context::ResponsesContext,
282
283
284
285
286
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
    // 1. Load conversation history
287
    let modified_request = match load_conversation_history(ctx, &request).await {
288
        Ok(req) => req,
289
        Err(response) => return response, // Already a Response with proper status code
290
291
292
293
    };

    // 2. Check if request has MCP tools - if so, use streaming tool loop
    if let Some(tools) = &request.tools {
294
295
296
297
        // Ensure dynamic MCP client is registered for request-scoped tools
        if ensure_request_mcp_client(&ctx.mcp_manager, tools)
            .await
            .is_some()
298
        {
299
300
            debug!("MCP tools detected in streaming mode, using streaming tool loop");

301
302
            return execute_tool_loop_streaming(ctx, modified_request, &request, headers, model_id)
                .await;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
        }
    }

    // 3. Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = match conversions::responses_to_chat(&modified_request) {
        Ok(req) => Arc::new(req),
        Err(e) => {
            return (
                StatusCode::BAD_REQUEST,
                axum::Json(json!({
                    "error": {
                        "message": format!("Failed to convert request: {}", e),
                        "type": "invalid_request_error"
                    }
                })),
            )
                .into_response();
        }
    };

    // 4. Execute chat pipeline and convert streaming format (no MCP tools)
324
    convert_chat_stream_to_responses_stream(ctx, chat_request, headers, model_id, &request).await
325
326
327
328
329
330
331
332
333
334
335
}

/// Convert chat streaming response to responses streaming format
///
/// This function:
/// 1. Gets chat SSE stream from pipeline
/// 2. Intercepts and parses each SSE event
/// 3. Converts ChatCompletionStreamResponse → ResponsesResponse delta
/// 4. Accumulates response state for final persistence
/// 5. Emits transformed SSE events in responses format
async fn convert_chat_stream_to_responses_stream(
336
    ctx: &super::context::ResponsesContext,
337
    chat_request: Arc<chat::ChatCompletionRequest>,
338
339
340
341
342
343
344
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    original_request: &ResponsesRequest,
) -> Response {
    debug!("Converting chat SSE stream to responses SSE format");

    // Get chat streaming response
345
346
347
348
349
350
351
352
    let chat_response = ctx
        .pipeline
        .execute_chat(
            chat_request.clone(),
            headers,
            model_id,
            ctx.components.clone(),
        )
353
354
355
356
357
358
359
360
361
362
363
        .await;

    // Extract body and headers from chat response
    let (parts, body) = chat_response.into_parts();

    // Create channel for transformed SSE events
    let (tx, rx) = mpsc::unbounded_channel::<Result<Bytes, std::io::Error>>();

    // Spawn background task to transform stream
    let original_request_clone = original_request.clone();
    let chat_request_clone = chat_request.clone();
364
365
366
    let response_storage = ctx.response_storage.clone();
    let conversation_storage = ctx.conversation_storage.clone();
    let conversation_item_storage = ctx.conversation_item_storage.clone();
367
368
369
370
371
372

    tokio::spawn(async move {
        if let Err(e) = process_and_transform_sse_stream(
            body,
            original_request_clone,
            chat_request_clone,
373
374
375
            response_storage,
            conversation_storage,
            conversation_item_storage,
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
            tx.clone(),
        )
        .await
        {
            warn!("Error transforming SSE stream: {}", e);
            let error_event = json!({
                "error": {
                    "message": e,
                    "type": "stream_error"
                }
            });
            let _ = tx.send(Ok(Bytes::from(format!("data: {}\n\n", error_event))));
        }

        // Send final [DONE] event
        let _ = tx.send(Ok(Bytes::from("data: [DONE]\n\n")));
    });

    // Build SSE response with transformed stream
    let stream = UnboundedReceiverStream::new(rx);
    let body = Body::from_stream(stream);

    let mut response = Response::builder().status(parts.status).body(body).unwrap();

    // Copy headers from original chat response
    *response.headers_mut() = parts.headers;

    // Ensure SSE headers are set
    response.headers_mut().insert(
        header::CONTENT_TYPE,
        header::HeaderValue::from_static("text/event-stream"),
    );
    response.headers_mut().insert(
        header::CACHE_CONTROL,
        header::HeaderValue::from_static("no-cache"),
    );
    response.headers_mut().insert(
        header::CONNECTION,
        header::HeaderValue::from_static("keep-alive"),
    );

    response
}

/// Process chat SSE stream and transform to responses format
async fn process_and_transform_sse_stream(
    body: Body,
    original_request: ResponsesRequest,
424
    _chat_request: Arc<chat::ChatCompletionRequest>,
425
426
427
    response_storage: Arc<dyn ResponseStorage>,
    conversation_storage: Arc<dyn ConversationStorage>,
    conversation_item_storage: Arc<dyn ConversationItemStorage>,
428
429
430
431
432
433
434
    tx: mpsc::UnboundedSender<Result<Bytes, std::io::Error>>,
) -> Result<(), String> {
    // Create accumulator for final response
    let mut accumulator = StreamingResponseAccumulator::new(&original_request);

    // Create event emitter for OpenAI-compatible streaming
    let response_id = format!("resp_{}", Uuid::new_v4());
435
    let model = original_request.model.clone();
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
    let created_at = chrono::Utc::now().timestamp() as u64;
    let mut event_emitter = ResponseStreamEventEmitter::new(response_id, model, created_at);

    // Convert body to data stream
    let mut stream = body.into_data_stream();

    // Process stream chunks (each chunk is a complete SSE event)
    while let Some(chunk_result) = stream.next().await {
        let chunk = chunk_result.map_err(|e| format!("Stream read error: {}", e))?;

        // Convert chunk to string
        let event_str = String::from_utf8_lossy(&chunk);
        let event = event_str.trim();

        // Check for end of stream
        if event == "data: [DONE]" {
            break;
        }

        // Parse SSE event (format: "data: {...}\n\n" or "data: {...}")
        if let Some(json_str) = event.strip_prefix("data: ") {
            let json_str = json_str.trim();

            // Try to parse as ChatCompletionStreamResponse
            match serde_json::from_str::<ChatCompletionStreamResponse>(json_str) {
                Ok(chat_chunk) => {
                    // Update accumulator
                    accumulator.process_chunk(&chat_chunk);

                    // Process chunk through event emitter (emits proper OpenAI events)
                    event_emitter.process_chunk(&chat_chunk, &tx)?;
                }
                Err(_) => {
                    // Not a valid chat chunk - might be error event, pass through
                    debug!("Non-chunk SSE event, passing through: {}", event);
                    if tx.send(Ok(Bytes::from(format!("{}\n\n", event)))).is_err() {
                        return Err("Client disconnected".to_string());
                    }
                }
            }
        }
    }

    // Emit final response.completed event with accumulated usage
    let usage_json = accumulator.usage.as_ref().map(|u| {
        let mut usage_obj = json!({
            "prompt_tokens": u.prompt_tokens,
            "completion_tokens": u.completion_tokens,
            "total_tokens": u.total_tokens
        });

        // Include reasoning_tokens if present
        if let Some(details) = &u.completion_tokens_details {
            if let Some(reasoning_tokens) = details.reasoning_tokens {
                usage_obj["completion_tokens_details"] = json!({
                    "reasoning_tokens": reasoning_tokens
                });
            }
        }

        usage_obj
    });

    let completed_event = event_emitter.emit_completed(usage_json.as_ref());
    event_emitter.send_event(&completed_event, &tx)?;

    // Finalize and persist accumulated response
    if original_request.store.unwrap_or(true) {
        let final_response = accumulator.finalize();

        if let Ok(response_json) = serde_json::to_value(&final_response) {
            if let Err(e) = persist_conversation_items(
508
509
510
                conversation_storage.clone(),
                conversation_item_storage.clone(),
                response_storage.clone(),
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
                &response_json,
                &original_request,
            )
            .await
            {
                warn!("Failed to persist streaming response: {}", e);
            } else {
                debug!("Persisted streaming response: {}", final_response.id);
            }
        }
    }

    Ok(())
}

/// Response accumulator for streaming responses
struct StreamingResponseAccumulator {
    // Response metadata
    response_id: String,
    model: String,
    created_at: i64,

    // Accumulated content
    content_buffer: String,
    reasoning_buffer: String,
    tool_calls: Vec<ResponseOutputItem>,

    // Completion state
    finish_reason: Option<String>,
540
    usage: Option<common::Usage>,
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590

    // Original request for final response construction
    original_request: ResponsesRequest,
}

impl StreamingResponseAccumulator {
    fn new(original_request: &ResponsesRequest) -> Self {
        Self {
            response_id: String::new(),
            model: String::new(),
            created_at: 0,
            content_buffer: String::new(),
            reasoning_buffer: String::new(),
            tool_calls: Vec::new(),
            finish_reason: None,
            usage: None,
            original_request: original_request.clone(),
        }
    }

    fn process_chunk(&mut self, chunk: &ChatCompletionStreamResponse) {
        // Initialize metadata on first chunk
        if self.response_id.is_empty() {
            self.response_id = chunk.id.clone();
            self.model = chunk.model.clone();
            self.created_at = chunk.created as i64;
        }

        // Process first choice (responses API doesn't support n>1)
        if let Some(choice) = chunk.choices.first() {
            // Accumulate content
            if let Some(content) = &choice.delta.content {
                self.content_buffer.push_str(content);
            }

            // Accumulate reasoning
            if let Some(reasoning) = &choice.delta.reasoning_content {
                self.reasoning_buffer.push_str(reasoning);
            }

            // Process tool call deltas
            if let Some(tool_call_deltas) = &choice.delta.tool_calls {
                for delta in tool_call_deltas {
                    // Use index directly (it's a u32, not Option<u32>)
                    let index = delta.index as usize;

                    // Ensure we have enough tool calls
                    while self.tool_calls.len() <= index {
                        self.tool_calls.push(ResponseOutputItem::FunctionToolCall {
                            id: String::new(),
591
                            call_id: String::new(),
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
                            name: String::new(),
                            arguments: String::new(),
                            output: None,
                            status: "in_progress".to_string(),
                        });
                    }

                    // Update the tool call at this index
                    if let ResponseOutputItem::FunctionToolCall {
                        id,
                        name,
                        arguments,
                        ..
                    } = &mut self.tool_calls[index]
                    {
                        if let Some(delta_id) = &delta.id {
                            id.push_str(delta_id);
                        }
                        if let Some(function) = &delta.function {
                            if let Some(delta_name) = &function.name {
                                name.push_str(delta_name);
                            }
                            if let Some(delta_args) = &function.arguments {
                                arguments.push_str(delta_args);
                            }
                        }
                    }
                }
            }

            // Update finish reason
            if let Some(reason) = &choice.finish_reason {
                self.finish_reason = Some(reason.clone());
            }
        }

        // Update usage
        if let Some(usage) = &chunk.usage {
            self.usage = Some(usage.clone());
        }
    }

    fn finalize(self) -> ResponsesResponse {
        let mut output: Vec<ResponseOutputItem> = Vec::new();

        // Add message content if present
        if !self.content_buffer.is_empty() {
            output.push(ResponseOutputItem::Message {
                id: format!("msg_{}", self.response_id),
                role: "assistant".to_string(),
                content: vec![ResponseContentPart::OutputText {
                    text: self.content_buffer,
                    annotations: vec![],
                    logprobs: None,
                }],
                status: "completed".to_string(),
            });
        }

        // Add reasoning if present
        if !self.reasoning_buffer.is_empty() {
            output.push(ResponseOutputItem::Reasoning {
                id: format!("reasoning_{}", self.response_id),
                summary: vec![],
656
657
658
                content: vec![ResponseReasoningContent::ReasoningText {
                    text: self.reasoning_buffer,
                }],
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
                status: Some("completed".to_string()),
            });
        }

        // Add tool calls
        output.extend(self.tool_calls);

        // Determine final status
        let status = match self.finish_reason.as_deref() {
            Some("stop") | Some("length") => ResponseStatus::Completed,
            Some("tool_calls") => ResponseStatus::InProgress,
            Some("failed") | Some("error") => ResponseStatus::Failed,
            _ => ResponseStatus::Completed,
        };

        // Convert usage
        let usage = self.usage.as_ref().map(|u| {
676
            let usage_info = common::UsageInfo {
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
                prompt_tokens: u.prompt_tokens,
                completion_tokens: u.completion_tokens,
                total_tokens: u.total_tokens,
                reasoning_tokens: u
                    .completion_tokens_details
                    .as_ref()
                    .and_then(|d| d.reasoning_tokens),
                prompt_tokens_details: None,
            };
            ResponsesUsage::Classic(usage_info)
        });

        ResponsesResponse {
            id: self.response_id,
            object: "response".to_string(),
            created_at: self.created_at,
            status,
            error: None,
            incomplete_details: None,
            instructions: self.original_request.instructions.clone(),
            max_output_tokens: self.original_request.max_output_tokens,
            model: self.model,
            output,
            parallel_tool_calls: self.original_request.parallel_tool_calls.unwrap_or(true),
            previous_response_id: self.original_request.previous_response_id.clone(),
            reasoning: None,
            store: self.original_request.store.unwrap_or(true),
            temperature: self.original_request.temperature,
            text: None,
            tool_choice: "auto".to_string(),
            tools: self.original_request.tools.clone().unwrap_or_default(),
            top_p: self.original_request.top_p,
            truncation: None,
            usage,
            user: None,
712
            safety_identifier: self.original_request.user.clone(),
713
714
715
716
717
718
719
720
721
722
723
            metadata: self.original_request.metadata.clone().unwrap_or_default(),
        }
    }
}

// ============================================================================
// Helper Functions
// ============================================================================

/// Execute request without MCP tool loop (simple pipeline execution)
async fn execute_without_mcp(
724
    ctx: &super::context::ResponsesContext,
725
726
727
728
729
    modified_request: &ResponsesRequest,
    original_request: &ResponsesRequest,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
730
) -> Result<ResponsesResponse, Response> {
731
732
    // Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = conversions::responses_to_chat(modified_request)
733
        .map_err(|e| error::bad_request(format!("Failed to convert request: {}", e)))?;
734

735
736
737
    // Execute chat pipeline (errors already have proper HTTP status codes)
    let chat_response = ctx
        .pipeline
738
739
740
741
        .execute_chat_for_responses(
            Arc::new(chat_request),
            headers,
            model_id,
742
            ctx.components.clone(),
743
        )
744
        .await?; // Preserve the Response error as-is
745
746

    // Convert ChatCompletionResponse → ResponsesResponse
747
748
    conversions::chat_to_responses(&chat_response, original_request, response_id)
        .map_err(|e| error::internal_error(format!("Failed to convert to responses format: {}", e)))
749
750
751
752
}

/// Load conversation history and response chains, returning modified request
async fn load_conversation_history(
753
    ctx: &super::context::ResponsesContext,
754
    request: &ResponsesRequest,
755
) -> Result<ResponsesRequest, Response> {
756
757
758
759
760
761
    let mut modified_request = request.clone();
    let mut conversation_items: Option<Vec<ResponseInputOutputItem>> = None;

    // Handle previous_response_id by loading response chain
    if let Some(ref prev_id_str) = modified_request.previous_response_id {
        let prev_id = ResponseId::from(prev_id_str.as_str());
762
763
764
765
766
        match ctx
            .response_storage
            .get_response_chain(&prev_id, None)
            .await
        {
767
768
769
            Ok(chain) => {
                let mut items = Vec::new();
                for stored in chain.responses.iter() {
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
                    // Convert input items from stored input (which is now a JSON array)
                    if let Some(input_arr) = stored.input.as_array() {
                        for item in input_arr {
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(input_item) => {
                                    items.push(input_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored input item: {}. Item: {}",
                                        e, item
                                    );
                                }
                            }
                        }
                    }

                    // Convert output items from stored output (which is now a JSON array)
                    if let Some(output_arr) = stored.output.as_array() {
789
                        for item in output_arr {
790
791
792
793
794
795
796
797
798
799
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(output_item) => {
                                    items.push(output_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored output item: {}. Item: {}",
                                        e, item
                                    );
                                }
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
                            }
                        }
                    }
                }
                conversation_items = Some(items);
                modified_request.previous_response_id = None;
            }
            Err(e) => {
                warn!(
                    "Failed to load previous response chain for {}: {}",
                    prev_id_str, e
                );
            }
        }
    }

    // Handle conversation by loading conversation history
    if let Some(ref conv_id_str) = request.conversation {
        let conv_id = ConversationId::from(conv_id_str.as_str());

820
821
822
823
824
        // Check if conversation exists - return error if not found
        let conversation = ctx
            .conversation_storage
            .get_conversation(&conv_id)
            .await
825
            .map_err(|e| error::internal_error(format!("Failed to check conversation: {}", e)))?;
826
827

        if conversation.is_none() {
828
            return Err(error::not_found(format!(
829
                "Conversation '{}' not found. Please create the conversation first using the conversations API.",
830
                conv_id_str
831
            )));
832
833
834
835
        }

        // Load conversation history
        const MAX_CONVERSATION_HISTORY_ITEMS: usize = 100;
836
        let params = data_connector::ListParams {
837
            limit: MAX_CONVERSATION_HISTORY_ITEMS,
838
            order: data_connector::SortOrder::Asc,
839
840
841
            after: None,
        };

842
843
844
845
846
        match ctx
            .conversation_item_storage
            .list_items(&conv_id, params)
            .await
        {
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
            Ok(stored_items) => {
                let mut items: Vec<ResponseInputOutputItem> = Vec::new();
                for item in stored_items.into_iter() {
                    if item.item_type == "message" {
                        if let Ok(content_parts) =
                            serde_json::from_value::<Vec<ResponseContentPart>>(item.content.clone())
                        {
                            items.push(ResponseInputOutputItem::Message {
                                id: item.id.0.clone(),
                                role: item.role.clone().unwrap_or_else(|| "user".to_string()),
                                content: content_parts,
                                status: item.status.clone(),
                            });
                        }
                    }
                }

                // Append current request
                match &modified_request.input {
                    ResponseInput::Text(text) => {
                        items.push(ResponseInputOutputItem::Message {
                            id: format!("msg_u_{}", conv_id.0),
                            role: "user".to_string(),
                            content: vec![ResponseContentPart::InputText { text: text.clone() }],
                            status: Some("completed".to_string()),
                        });
                    }
                    ResponseInput::Items(current_items) => {
875
876
                        // Process all item types, converting SimpleInputMessage to Message
                        for item in current_items.iter() {
877
                            let normalized = responses::normalize_input_item(item);
878
879
                            items.push(normalized);
                        }
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
                    }
                }

                modified_request.input = ResponseInput::Items(items);
            }
            Err(e) => {
                warn!("Failed to load conversation history: {}", e);
            }
        }
    }

    // If we have conversation_items from previous_response_id, merge them
    if let Some(mut items) = conversation_items {
        // Append current request
        match &modified_request.input {
            ResponseInput::Text(text) => {
                items.push(ResponseInputOutputItem::Message {
                    id: format!(
                        "msg_u_{}",
                        request
                            .previous_response_id
                            .as_ref()
                            .unwrap_or(&"new".to_string())
                    ),
                    role: "user".to_string(),
                    content: vec![ResponseContentPart::InputText { text: text.clone() }],
                    status: Some("completed".to_string()),
                });
            }
            ResponseInput::Items(current_items) => {
910
911
                // Process all item types, converting SimpleInputMessage to Message
                for item in current_items.iter() {
912
                    let normalized = responses::normalize_input_item(item);
913
914
                    items.push(normalized);
                }
915
916
917
918
919
920
921
922
            }
        }

        modified_request.input = ResponseInput::Items(items);
    }

    Ok(modified_request)
}