handlers.rs 31.3 KB
Newer Older
1
2
//! Handler functions for /v1/responses endpoints
//!
3
4
5
6
7
8
9
10
11
//! # Public API
//!
//! - `route_responses()` - POST /v1/responses (main entry point)
//! - `get_response_impl()` - GET /v1/responses/{response_id}
//! - `cancel_response_impl()` - POST /v1/responses/{response_id}/cancel
//!
//! # Architecture
//!
//! This module orchestrates all request handling for the /v1/responses endpoint.
12
//! It supports two execution modes:
13
14
//!
//! 1. **Synchronous** - Returns complete response immediately
15
16
17
18
//! 2. **Streaming** - Returns SSE stream with real-time events
//!
//! Note: Background mode is no longer supported. Requests with background=true
//! will be rejected with a 400 error.
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
//!
//! # Request Flow
//!
//! ```text
//! route_responses()
//!   ├─► route_responses_sync()       → route_responses_internal()
//!   └─► route_responses_streaming()  → convert_chat_stream_to_responses_stream()
//!
//! route_responses_internal()
//!   ├─► load_conversation_history()
//!   ├─► execute_tool_loop() (if MCP tools)
//!   │   └─► pipeline.execute_chat_for_responses() [loop]
//!   └─► execute_without_mcp() (if no MCP tools)
//!       └─► pipeline.execute_chat_for_responses()
//! ```
34

35
use std::sync::Arc;
36
37
38

use axum::{
    body::Body,
39
    http::{self, StatusCode},
40
41
42
43
44
    response::{IntoResponse, Response},
};
use bytes::Bytes;
use futures_util::StreamExt;
use serde_json::json;
45
46
use tokio::sync::mpsc;
use tracing::{debug, warn};
47
use uuid::Uuid;
48
use validator::Validate;
49
50
51

use super::{
    conversions,
52
    tool_loop::{execute_tool_loop, execute_tool_loop_streaming},
53
54
55
};
use crate::{
    data_connector::{
56
57
        self, ConversationId, ConversationItemStorage, ConversationStorage, ResponseId,
        ResponseStorage,
58
59
    },
    protocols::{
60
61
        chat::{self, ChatCompletionStreamResponse},
        common,
62
        responses::{
63
64
65
            self, ResponseContentPart, ResponseInput, ResponseInputOutputItem, ResponseOutputItem,
            ResponseReasoningContent, ResponseStatus, ResponsesRequest, ResponsesResponse,
            ResponsesUsage,
66
67
        },
    },
68
    routers::{
69
70
71
72
73
74
75
        grpc::{
            common::responses::{
                build_sse_response, ensure_mcp_connection, streaming::ResponseStreamEventEmitter,
            },
            error,
        },
        openai::conversations::persist_conversation_items,
76
    },
77
78
79
80
81
82
};

/// Main handler for POST /v1/responses
///
/// Validates request, determines execution mode (sync/async/streaming), and delegates
pub async fn route_responses(
83
    ctx: &super::context::ResponsesContext,
84
85
86
87
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    // 1. Validate request (includes conversation ID format)
    if let Err(validation_errors) = request.validate() {
        // Extract the first error message for conversation field
        let error_message = validation_errors
            .field_errors()
            .get("conversation")
            .and_then(|errors| errors.first())
            .and_then(|error| error.message.as_ref())
            .map(|msg| msg.to_string())
            .unwrap_or_else(|| "Invalid request parameters".to_string());

        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": error_message,
                    "type": "invalid_request_error",
                    "param": "conversation",
                    "code": "invalid_value"
                }
            })),
        )
            .into_response();
    }

    // 2. Validate mutually exclusive parameters
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
    if request.previous_response_id.is_some() && request.conversation.is_some() {
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": "Mutually exclusive parameters. Ensure you are only providing one of: 'previous_response_id' or 'conversation'.",
                    "type": "invalid_request_error",
                    "param": serde_json::Value::Null,
                    "code": "mutually_exclusive_parameters"
                }
            })),
        )
            .into_response();
    }

129
    // 3. Reject background mode (no longer supported)
130
    let is_background = request.background.unwrap_or(false);
131
    if is_background {
132
133
134
135
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
136
                    "message": "Background mode is not supported. Please set 'background' to false or omit it.",
137
                    "type": "invalid_request_error",
138
139
                    "param": "background",
                    "code": "unsupported_parameter"
140
141
142
143
144
145
                }
            })),
        )
            .into_response();
    }

146
    // 4. Route based on execution mode
147
    let is_streaming = request.stream.unwrap_or(false);
148
    if is_streaming {
149
        route_responses_streaming(ctx, request, headers, model_id).await
150
    } else {
151
152
153
154
        // Generate response ID for synchronous execution
        // TODO: we may remove this when we have builder pattern for responses
        let response_id = Some(format!("resp_{}", Uuid::new_v4()));
        route_responses_sync(ctx, request, headers, model_id, response_id).await
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
    }
}

// ============================================================================
// Synchronous Execution
// ============================================================================

/// Execute synchronous responses request
///
/// This is the core execution path that:
/// 1. Loads conversation history / response chain
/// 2. Converts to ChatCompletionRequest
/// 3. Executes chat pipeline
/// 4. Converts back to ResponsesResponse
/// 5. Persists to storage
async fn route_responses_sync(
171
    ctx: &super::context::ResponsesContext,
172
173
174
175
176
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
) -> Response {
177
    match route_responses_internal(ctx, request, headers, model_id, response_id).await {
178
        Ok(responses_response) => axum::Json(responses_response).into_response(),
179
        Err(response) => response, // Already a Response with proper status code
180
181
182
183
184
    }
}

/// Internal implementation that returns Result for background task compatibility
async fn route_responses_internal(
185
    ctx: &super::context::ResponsesContext,
186
187
188
189
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
190
) -> Result<ResponsesResponse, Response> {
191
    // 1. Load conversation history and build modified request
192
    let modified_request = load_conversation_history(ctx, &request).await?;
193

194
195
    // 2. Check MCP connection and get whether MCP tools are present
    let has_mcp_tools = ensure_mcp_connection(&ctx.mcp_manager, request.tools.as_deref()).await?;
196

197
198
199
200
201
202
203
204
205
206
207
208
209
    let responses_response = if has_mcp_tools {
        debug!("MCP tools detected, using tool loop");

        // Execute with MCP tool loop
        execute_tool_loop(
            ctx,
            modified_request,
            &request,
            headers,
            model_id,
            response_id.clone(),
        )
        .await?
210
    } else {
211
        // No MCP tools - execute without MCP (may have function tools or no tools)
212
        execute_without_mcp(
213
            ctx,
214
215
216
217
218
219
220
221
222
223
224
225
226
            &modified_request,
            &request,
            headers,
            model_id,
            response_id.clone(),
        )
        .await?
    };

    // 5. Persist response to storage if store=true
    if request.store.unwrap_or(true) {
        if let Ok(response_json) = serde_json::to_value(&responses_response) {
            if let Err(e) = persist_conversation_items(
227
228
229
                ctx.conversation_storage.clone(),
                ctx.conversation_item_storage.clone(),
                ctx.response_storage.clone(),
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
                &response_json,
                &request,
            )
            .await
            {
                warn!("Failed to persist response: {}", e);
            }
        }
    }

    Ok(responses_response)
}

/// Execute streaming responses request
async fn route_responses_streaming(
245
    ctx: &super::context::ResponsesContext,
246
247
248
249
250
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
    // 1. Load conversation history
251
    let modified_request = match load_conversation_history(ctx, &request).await {
252
        Ok(req) => req,
253
        Err(response) => return response, // Already a Response with proper status code
254
255
    };

256
257
258
259
260
261
    // 2. Check MCP connection and get whether MCP tools are present
    let has_mcp_tools =
        match ensure_mcp_connection(&ctx.mcp_manager, request.tools.as_deref()).await {
            Ok(has_mcp) => has_mcp,
            Err(response) => return response,
        };
262

263
264
265
266
267
    if has_mcp_tools {
        debug!("MCP tools detected in streaming mode, using streaming tool loop");

        return execute_tool_loop_streaming(ctx, modified_request, &request, headers, model_id)
            .await;
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
    }

    // 3. Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = match conversions::responses_to_chat(&modified_request) {
        Ok(req) => Arc::new(req),
        Err(e) => {
            return (
                StatusCode::BAD_REQUEST,
                axum::Json(json!({
                    "error": {
                        "message": format!("Failed to convert request: {}", e),
                        "type": "invalid_request_error"
                    }
                })),
            )
                .into_response();
        }
    };

    // 4. Execute chat pipeline and convert streaming format (no MCP tools)
288
    convert_chat_stream_to_responses_stream(ctx, chat_request, headers, model_id, &request).await
289
290
291
292
293
294
295
296
297
298
299
}

/// Convert chat streaming response to responses streaming format
///
/// This function:
/// 1. Gets chat SSE stream from pipeline
/// 2. Intercepts and parses each SSE event
/// 3. Converts ChatCompletionStreamResponse → ResponsesResponse delta
/// 4. Accumulates response state for final persistence
/// 5. Emits transformed SSE events in responses format
async fn convert_chat_stream_to_responses_stream(
300
    ctx: &super::context::ResponsesContext,
301
    chat_request: Arc<chat::ChatCompletionRequest>,
302
303
304
305
306
307
308
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    original_request: &ResponsesRequest,
) -> Response {
    debug!("Converting chat SSE stream to responses SSE format");

    // Get chat streaming response
309
310
311
312
313
314
315
316
    let chat_response = ctx
        .pipeline
        .execute_chat(
            chat_request.clone(),
            headers,
            model_id,
            ctx.components.clone(),
        )
317
318
        .await;

319
320
    // Extract body from chat response
    let (_parts, body) = chat_response.into_parts();
321
322
323
324
325
326
327

    // Create channel for transformed SSE events
    let (tx, rx) = mpsc::unbounded_channel::<Result<Bytes, std::io::Error>>();

    // Spawn background task to transform stream
    let original_request_clone = original_request.clone();
    let chat_request_clone = chat_request.clone();
328
329
330
    let response_storage = ctx.response_storage.clone();
    let conversation_storage = ctx.conversation_storage.clone();
    let conversation_item_storage = ctx.conversation_item_storage.clone();
331
332
333
334
335
336

    tokio::spawn(async move {
        if let Err(e) = process_and_transform_sse_stream(
            body,
            original_request_clone,
            chat_request_clone,
337
338
339
            response_storage,
            conversation_storage,
            conversation_item_storage,
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
            tx.clone(),
        )
        .await
        {
            warn!("Error transforming SSE stream: {}", e);
            let error_event = json!({
                "error": {
                    "message": e,
                    "type": "stream_error"
                }
            });
            let _ = tx.send(Ok(Bytes::from(format!("data: {}\n\n", error_event))));
        }

        // Send final [DONE] event
        let _ = tx.send(Ok(Bytes::from("data: [DONE]\n\n")));
    });

    // Build SSE response with transformed stream
359
    build_sse_response(rx)
360
361
362
363
364
365
}

/// Process chat SSE stream and transform to responses format
async fn process_and_transform_sse_stream(
    body: Body,
    original_request: ResponsesRequest,
366
    _chat_request: Arc<chat::ChatCompletionRequest>,
367
368
369
    response_storage: Arc<dyn ResponseStorage>,
    conversation_storage: Arc<dyn ConversationStorage>,
    conversation_item_storage: Arc<dyn ConversationItemStorage>,
370
371
372
373
374
375
376
    tx: mpsc::UnboundedSender<Result<Bytes, std::io::Error>>,
) -> Result<(), String> {
    // Create accumulator for final response
    let mut accumulator = StreamingResponseAccumulator::new(&original_request);

    // Create event emitter for OpenAI-compatible streaming
    let response_id = format!("resp_{}", Uuid::new_v4());
377
    let model = original_request.model.clone();
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
    let created_at = chrono::Utc::now().timestamp() as u64;
    let mut event_emitter = ResponseStreamEventEmitter::new(response_id, model, created_at);

    // Convert body to data stream
    let mut stream = body.into_data_stream();

    // Process stream chunks (each chunk is a complete SSE event)
    while let Some(chunk_result) = stream.next().await {
        let chunk = chunk_result.map_err(|e| format!("Stream read error: {}", e))?;

        // Convert chunk to string
        let event_str = String::from_utf8_lossy(&chunk);
        let event = event_str.trim();

        // Check for end of stream
        if event == "data: [DONE]" {
            break;
        }

        // Parse SSE event (format: "data: {...}\n\n" or "data: {...}")
        if let Some(json_str) = event.strip_prefix("data: ") {
            let json_str = json_str.trim();

            // Try to parse as ChatCompletionStreamResponse
            match serde_json::from_str::<ChatCompletionStreamResponse>(json_str) {
                Ok(chat_chunk) => {
                    // Update accumulator
                    accumulator.process_chunk(&chat_chunk);

                    // Process chunk through event emitter (emits proper OpenAI events)
                    event_emitter.process_chunk(&chat_chunk, &tx)?;
                }
                Err(_) => {
                    // Not a valid chat chunk - might be error event, pass through
                    debug!("Non-chunk SSE event, passing through: {}", event);
                    if tx.send(Ok(Bytes::from(format!("{}\n\n", event)))).is_err() {
                        return Err("Client disconnected".to_string());
                    }
                }
            }
        }
    }

    // Emit final response.completed event with accumulated usage
    let usage_json = accumulator.usage.as_ref().map(|u| {
        let mut usage_obj = json!({
            "prompt_tokens": u.prompt_tokens,
            "completion_tokens": u.completion_tokens,
            "total_tokens": u.total_tokens
        });

        // Include reasoning_tokens if present
        if let Some(details) = &u.completion_tokens_details {
            if let Some(reasoning_tokens) = details.reasoning_tokens {
                usage_obj["completion_tokens_details"] = json!({
                    "reasoning_tokens": reasoning_tokens
                });
            }
        }

        usage_obj
    });

    let completed_event = event_emitter.emit_completed(usage_json.as_ref());
    event_emitter.send_event(&completed_event, &tx)?;

    // Finalize and persist accumulated response
    if original_request.store.unwrap_or(true) {
        let final_response = accumulator.finalize();

        if let Ok(response_json) = serde_json::to_value(&final_response) {
            if let Err(e) = persist_conversation_items(
450
451
452
                conversation_storage.clone(),
                conversation_item_storage.clone(),
                response_storage.clone(),
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
                &response_json,
                &original_request,
            )
            .await
            {
                warn!("Failed to persist streaming response: {}", e);
            } else {
                debug!("Persisted streaming response: {}", final_response.id);
            }
        }
    }

    Ok(())
}

/// Response accumulator for streaming responses
struct StreamingResponseAccumulator {
    // Response metadata
    response_id: String,
    model: String,
    created_at: i64,

    // Accumulated content
    content_buffer: String,
    reasoning_buffer: String,
    tool_calls: Vec<ResponseOutputItem>,

    // Completion state
    finish_reason: Option<String>,
482
    usage: Option<common::Usage>,
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532

    // Original request for final response construction
    original_request: ResponsesRequest,
}

impl StreamingResponseAccumulator {
    fn new(original_request: &ResponsesRequest) -> Self {
        Self {
            response_id: String::new(),
            model: String::new(),
            created_at: 0,
            content_buffer: String::new(),
            reasoning_buffer: String::new(),
            tool_calls: Vec::new(),
            finish_reason: None,
            usage: None,
            original_request: original_request.clone(),
        }
    }

    fn process_chunk(&mut self, chunk: &ChatCompletionStreamResponse) {
        // Initialize metadata on first chunk
        if self.response_id.is_empty() {
            self.response_id = chunk.id.clone();
            self.model = chunk.model.clone();
            self.created_at = chunk.created as i64;
        }

        // Process first choice (responses API doesn't support n>1)
        if let Some(choice) = chunk.choices.first() {
            // Accumulate content
            if let Some(content) = &choice.delta.content {
                self.content_buffer.push_str(content);
            }

            // Accumulate reasoning
            if let Some(reasoning) = &choice.delta.reasoning_content {
                self.reasoning_buffer.push_str(reasoning);
            }

            // Process tool call deltas
            if let Some(tool_call_deltas) = &choice.delta.tool_calls {
                for delta in tool_call_deltas {
                    // Use index directly (it's a u32, not Option<u32>)
                    let index = delta.index as usize;

                    // Ensure we have enough tool calls
                    while self.tool_calls.len() <= index {
                        self.tool_calls.push(ResponseOutputItem::FunctionToolCall {
                            id: String::new(),
533
                            call_id: String::new(),
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
                            name: String::new(),
                            arguments: String::new(),
                            output: None,
                            status: "in_progress".to_string(),
                        });
                    }

                    // Update the tool call at this index
                    if let ResponseOutputItem::FunctionToolCall {
                        id,
                        name,
                        arguments,
                        ..
                    } = &mut self.tool_calls[index]
                    {
                        if let Some(delta_id) = &delta.id {
                            id.push_str(delta_id);
                        }
                        if let Some(function) = &delta.function {
                            if let Some(delta_name) = &function.name {
                                name.push_str(delta_name);
                            }
                            if let Some(delta_args) = &function.arguments {
                                arguments.push_str(delta_args);
                            }
                        }
                    }
                }
            }

            // Update finish reason
            if let Some(reason) = &choice.finish_reason {
                self.finish_reason = Some(reason.clone());
            }
        }

        // Update usage
        if let Some(usage) = &chunk.usage {
            self.usage = Some(usage.clone());
        }
    }

    fn finalize(self) -> ResponsesResponse {
        let mut output: Vec<ResponseOutputItem> = Vec::new();

        // Add message content if present
        if !self.content_buffer.is_empty() {
            output.push(ResponseOutputItem::Message {
                id: format!("msg_{}", self.response_id),
                role: "assistant".to_string(),
                content: vec![ResponseContentPart::OutputText {
                    text: self.content_buffer,
                    annotations: vec![],
                    logprobs: None,
                }],
                status: "completed".to_string(),
            });
        }

        // Add reasoning if present
        if !self.reasoning_buffer.is_empty() {
            output.push(ResponseOutputItem::Reasoning {
                id: format!("reasoning_{}", self.response_id),
                summary: vec![],
598
599
600
                content: vec![ResponseReasoningContent::ReasoningText {
                    text: self.reasoning_buffer,
                }],
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
                status: Some("completed".to_string()),
            });
        }

        // Add tool calls
        output.extend(self.tool_calls);

        // Determine final status
        let status = match self.finish_reason.as_deref() {
            Some("stop") | Some("length") => ResponseStatus::Completed,
            Some("tool_calls") => ResponseStatus::InProgress,
            Some("failed") | Some("error") => ResponseStatus::Failed,
            _ => ResponseStatus::Completed,
        };

        // Convert usage
        let usage = self.usage.as_ref().map(|u| {
618
            let usage_info = common::UsageInfo {
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
                prompt_tokens: u.prompt_tokens,
                completion_tokens: u.completion_tokens,
                total_tokens: u.total_tokens,
                reasoning_tokens: u
                    .completion_tokens_details
                    .as_ref()
                    .and_then(|d| d.reasoning_tokens),
                prompt_tokens_details: None,
            };
            ResponsesUsage::Classic(usage_info)
        });

        ResponsesResponse {
            id: self.response_id,
            object: "response".to_string(),
            created_at: self.created_at,
            status,
            error: None,
            incomplete_details: None,
            instructions: self.original_request.instructions.clone(),
            max_output_tokens: self.original_request.max_output_tokens,
            model: self.model,
            output,
            parallel_tool_calls: self.original_request.parallel_tool_calls.unwrap_or(true),
            previous_response_id: self.original_request.previous_response_id.clone(),
            reasoning: None,
            store: self.original_request.store.unwrap_or(true),
            temperature: self.original_request.temperature,
            text: None,
            tool_choice: "auto".to_string(),
            tools: self.original_request.tools.clone().unwrap_or_default(),
            top_p: self.original_request.top_p,
            truncation: None,
            usage,
            user: None,
654
            safety_identifier: self.original_request.user.clone(),
655
656
657
658
659
660
661
662
663
664
665
            metadata: self.original_request.metadata.clone().unwrap_or_default(),
        }
    }
}

// ============================================================================
// Helper Functions
// ============================================================================

/// Execute request without MCP tool loop (simple pipeline execution)
async fn execute_without_mcp(
666
    ctx: &super::context::ResponsesContext,
667
668
669
670
671
    modified_request: &ResponsesRequest,
    original_request: &ResponsesRequest,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
672
) -> Result<ResponsesResponse, Response> {
673
674
    // Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = conversions::responses_to_chat(modified_request)
675
        .map_err(|e| error::bad_request(format!("Failed to convert request: {}", e)))?;
676

677
678
679
    // Execute chat pipeline (errors already have proper HTTP status codes)
    let chat_response = ctx
        .pipeline
680
681
682
683
        .execute_chat_for_responses(
            Arc::new(chat_request),
            headers,
            model_id,
684
            ctx.components.clone(),
685
        )
686
        .await?; // Preserve the Response error as-is
687
688

    // Convert ChatCompletionResponse → ResponsesResponse
689
690
    conversions::chat_to_responses(&chat_response, original_request, response_id)
        .map_err(|e| error::internal_error(format!("Failed to convert to responses format: {}", e)))
691
692
693
694
}

/// Load conversation history and response chains, returning modified request
async fn load_conversation_history(
695
    ctx: &super::context::ResponsesContext,
696
    request: &ResponsesRequest,
697
) -> Result<ResponsesRequest, Response> {
698
699
700
701
702
703
    let mut modified_request = request.clone();
    let mut conversation_items: Option<Vec<ResponseInputOutputItem>> = None;

    // Handle previous_response_id by loading response chain
    if let Some(ref prev_id_str) = modified_request.previous_response_id {
        let prev_id = ResponseId::from(prev_id_str.as_str());
704
705
706
707
708
        match ctx
            .response_storage
            .get_response_chain(&prev_id, None)
            .await
        {
709
710
711
            Ok(chain) => {
                let mut items = Vec::new();
                for stored in chain.responses.iter() {
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
                    // Convert input items from stored input (which is now a JSON array)
                    if let Some(input_arr) = stored.input.as_array() {
                        for item in input_arr {
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(input_item) => {
                                    items.push(input_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored input item: {}. Item: {}",
                                        e, item
                                    );
                                }
                            }
                        }
                    }

                    // Convert output items from stored output (which is now a JSON array)
                    if let Some(output_arr) = stored.output.as_array() {
731
                        for item in output_arr {
732
733
734
735
736
737
738
739
740
741
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(output_item) => {
                                    items.push(output_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored output item: {}. Item: {}",
                                        e, item
                                    );
                                }
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
                            }
                        }
                    }
                }
                conversation_items = Some(items);
                modified_request.previous_response_id = None;
            }
            Err(e) => {
                warn!(
                    "Failed to load previous response chain for {}: {}",
                    prev_id_str, e
                );
            }
        }
    }

    // Handle conversation by loading conversation history
    if let Some(ref conv_id_str) = request.conversation {
        let conv_id = ConversationId::from(conv_id_str.as_str());

762
763
764
765
766
        // Check if conversation exists - return error if not found
        let conversation = ctx
            .conversation_storage
            .get_conversation(&conv_id)
            .await
767
            .map_err(|e| error::internal_error(format!("Failed to check conversation: {}", e)))?;
768
769

        if conversation.is_none() {
770
            return Err(error::not_found(format!(
771
                "Conversation '{}' not found. Please create the conversation first using the conversations API.",
772
                conv_id_str
773
            )));
774
775
776
777
        }

        // Load conversation history
        const MAX_CONVERSATION_HISTORY_ITEMS: usize = 100;
778
        let params = data_connector::ListParams {
779
            limit: MAX_CONVERSATION_HISTORY_ITEMS,
780
            order: data_connector::SortOrder::Asc,
781
782
783
            after: None,
        };

784
785
786
787
788
        match ctx
            .conversation_item_storage
            .list_items(&conv_id, params)
            .await
        {
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
            Ok(stored_items) => {
                let mut items: Vec<ResponseInputOutputItem> = Vec::new();
                for item in stored_items.into_iter() {
                    if item.item_type == "message" {
                        if let Ok(content_parts) =
                            serde_json::from_value::<Vec<ResponseContentPart>>(item.content.clone())
                        {
                            items.push(ResponseInputOutputItem::Message {
                                id: item.id.0.clone(),
                                role: item.role.clone().unwrap_or_else(|| "user".to_string()),
                                content: content_parts,
                                status: item.status.clone(),
                            });
                        }
                    }
                }

                // Append current request
                match &modified_request.input {
                    ResponseInput::Text(text) => {
                        items.push(ResponseInputOutputItem::Message {
                            id: format!("msg_u_{}", conv_id.0),
                            role: "user".to_string(),
                            content: vec![ResponseContentPart::InputText { text: text.clone() }],
                            status: Some("completed".to_string()),
                        });
                    }
                    ResponseInput::Items(current_items) => {
817
818
                        // Process all item types, converting SimpleInputMessage to Message
                        for item in current_items.iter() {
819
                            let normalized = responses::normalize_input_item(item);
820
821
                            items.push(normalized);
                        }
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
                    }
                }

                modified_request.input = ResponseInput::Items(items);
            }
            Err(e) => {
                warn!("Failed to load conversation history: {}", e);
            }
        }
    }

    // If we have conversation_items from previous_response_id, merge them
    if let Some(mut items) = conversation_items {
        // Append current request
        match &modified_request.input {
            ResponseInput::Text(text) => {
                items.push(ResponseInputOutputItem::Message {
                    id: format!(
                        "msg_u_{}",
                        request
                            .previous_response_id
                            .as_ref()
                            .unwrap_or(&"new".to_string())
                    ),
                    role: "user".to_string(),
                    content: vec![ResponseContentPart::InputText { text: text.clone() }],
                    status: Some("completed".to_string()),
                });
            }
            ResponseInput::Items(current_items) => {
852
853
                // Process all item types, converting SimpleInputMessage to Message
                for item in current_items.iter() {
854
                    let normalized = responses::normalize_input_item(item);
855
856
                    items.push(normalized);
                }
857
858
859
860
861
862
863
864
            }
        }

        modified_request.input = ResponseInput::Items(items);
    }

    Ok(modified_request)
}