handlers.rs 31 KB
Newer Older
1
2
//! Handler functions for /v1/responses endpoints
//!
3
4
5
6
7
8
9
10
11
//! # Public API
//!
//! - `route_responses()` - POST /v1/responses (main entry point)
//! - `get_response_impl()` - GET /v1/responses/{response_id}
//! - `cancel_response_impl()` - POST /v1/responses/{response_id}/cancel
//!
//! # Architecture
//!
//! This module orchestrates all request handling for the /v1/responses endpoint.
12
//! It supports two execution modes:
13
14
//!
//! 1. **Synchronous** - Returns complete response immediately
15
16
17
18
//! 2. **Streaming** - Returns SSE stream with real-time events
//!
//! Note: Background mode is no longer supported. Requests with background=true
//! will be rejected with a 400 error.
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
//!
//! # Request Flow
//!
//! ```text
//! route_responses()
//!   ├─► route_responses_sync()       → route_responses_internal()
//!   └─► route_responses_streaming()  → convert_chat_stream_to_responses_stream()
//!
//! route_responses_internal()
//!   ├─► load_conversation_history()
//!   ├─► execute_tool_loop() (if MCP tools)
//!   │   └─► pipeline.execute_chat_for_responses() [loop]
//!   └─► execute_without_mcp() (if no MCP tools)
//!       └─► pipeline.execute_chat_for_responses()
//! ```
34

35
use std::sync::Arc;
36
37
38

use axum::{
    body::Body,
39
    http::{self, StatusCode},
40
41
42
43
44
    response::{IntoResponse, Response},
};
use bytes::Bytes;
use futures_util::StreamExt;
use serde_json::json;
45
46
use tokio::sync::mpsc;
use tracing::{debug, warn};
47
use uuid::Uuid;
48
use validator::Validate;
49
50
51

use super::{
    conversions,
52
    tool_loop::{execute_tool_loop, execute_tool_loop_streaming},
53
54
55
};
use crate::{
    data_connector::{
56
57
        self, ConversationId, ConversationItemStorage, ConversationStorage, ResponseId,
        ResponseStorage,
58
59
    },
    protocols::{
60
        chat::{self, ChatCompletionStreamResponse},
61
        common::{self, ToolChoice},
62
        responses::{
63
64
65
            self, ResponseContentPart, ResponseInput, ResponseInputOutputItem, ResponseOutputItem,
            ResponseReasoningContent, ResponseStatus, ResponsesRequest, ResponsesResponse,
            ResponsesUsage,
66
67
        },
    },
68
69
70
71
    routers::grpc::{
        common::responses::{
            build_sse_response, ensure_mcp_connection, persist_response_if_needed,
            streaming::ResponseStreamEventEmitter,
72
        },
73
        error,
74
    },
75
76
77
78
79
80
};

/// Main handler for POST /v1/responses
///
/// Validates request, determines execution mode (sync/async/streaming), and delegates
pub async fn route_responses(
81
    ctx: &super::context::ResponsesContext,
82
83
84
85
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    // 1. Validate request (includes conversation ID format)
    if let Err(validation_errors) = request.validate() {
        // Extract the first error message for conversation field
        let error_message = validation_errors
            .field_errors()
            .get("conversation")
            .and_then(|errors| errors.first())
            .and_then(|error| error.message.as_ref())
            .map(|msg| msg.to_string())
            .unwrap_or_else(|| "Invalid request parameters".to_string());

        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": error_message,
                    "type": "invalid_request_error",
                    "param": "conversation",
                    "code": "invalid_value"
                }
            })),
        )
            .into_response();
    }

    // 2. Validate mutually exclusive parameters
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
    if request.previous_response_id.is_some() && request.conversation.is_some() {
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
                    "message": "Mutually exclusive parameters. Ensure you are only providing one of: 'previous_response_id' or 'conversation'.",
                    "type": "invalid_request_error",
                    "param": serde_json::Value::Null,
                    "code": "mutually_exclusive_parameters"
                }
            })),
        )
            .into_response();
    }

127
    // 3. Reject background mode (no longer supported)
128
    let is_background = request.background.unwrap_or(false);
129
    if is_background {
130
131
132
133
        return (
            StatusCode::BAD_REQUEST,
            axum::Json(json!({
                "error": {
134
                    "message": "Background mode is not supported. Please set 'background' to false or omit it.",
135
                    "type": "invalid_request_error",
136
137
                    "param": "background",
                    "code": "unsupported_parameter"
138
139
140
141
142
143
                }
            })),
        )
            .into_response();
    }

144
    // 4. Route based on execution mode
145
    let is_streaming = request.stream.unwrap_or(false);
146
    if is_streaming {
147
        route_responses_streaming(ctx, request, headers, model_id).await
148
    } else {
149
150
151
152
        // Generate response ID for synchronous execution
        // TODO: we may remove this when we have builder pattern for responses
        let response_id = Some(format!("resp_{}", Uuid::new_v4()));
        route_responses_sync(ctx, request, headers, model_id, response_id).await
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    }
}

// ============================================================================
// Synchronous Execution
// ============================================================================

/// Execute synchronous responses request
///
/// This is the core execution path that:
/// 1. Loads conversation history / response chain
/// 2. Converts to ChatCompletionRequest
/// 3. Executes chat pipeline
/// 4. Converts back to ResponsesResponse
/// 5. Persists to storage
async fn route_responses_sync(
169
    ctx: &super::context::ResponsesContext,
170
171
172
173
174
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
) -> Response {
175
    match route_responses_internal(ctx, request, headers, model_id, response_id).await {
176
        Ok(responses_response) => axum::Json(responses_response).into_response(),
177
        Err(response) => response, // Already a Response with proper status code
178
179
180
181
182
    }
}

/// Internal implementation that returns Result for background task compatibility
async fn route_responses_internal(
183
    ctx: &super::context::ResponsesContext,
184
185
186
187
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
188
) -> Result<ResponsesResponse, Response> {
189
    // 1. Load conversation history and build modified request
190
    let modified_request = load_conversation_history(ctx, &request).await?;
191

192
193
    // 2. Check MCP connection and get whether MCP tools are present
    let has_mcp_tools = ensure_mcp_connection(&ctx.mcp_manager, request.tools.as_deref()).await?;
194

195
196
197
198
199
200
201
202
203
204
205
206
207
    let responses_response = if has_mcp_tools {
        debug!("MCP tools detected, using tool loop");

        // Execute with MCP tool loop
        execute_tool_loop(
            ctx,
            modified_request,
            &request,
            headers,
            model_id,
            response_id.clone(),
        )
        .await?
208
    } else {
209
        // No MCP tools - execute without MCP (may have function tools or no tools)
210
        execute_without_mcp(
211
            ctx,
212
213
214
215
216
217
218
219
220
221
            &modified_request,
            &request,
            headers,
            model_id,
            response_id.clone(),
        )
        .await?
    };

    // 5. Persist response to storage if store=true
222
223
224
225
226
227
228
229
    persist_response_if_needed(
        ctx.conversation_storage.clone(),
        ctx.conversation_item_storage.clone(),
        ctx.response_storage.clone(),
        &responses_response,
        &request,
    )
    .await;
230
231
232
233
234
235

    Ok(responses_response)
}

/// Execute streaming responses request
async fn route_responses_streaming(
236
    ctx: &super::context::ResponsesContext,
237
238
239
240
241
    request: Arc<ResponsesRequest>,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
) -> Response {
    // 1. Load conversation history
242
    let modified_request = match load_conversation_history(ctx, &request).await {
243
        Ok(req) => req,
244
        Err(response) => return response, // Already a Response with proper status code
245
246
    };

247
248
249
250
251
252
    // 2. Check MCP connection and get whether MCP tools are present
    let has_mcp_tools =
        match ensure_mcp_connection(&ctx.mcp_manager, request.tools.as_deref()).await {
            Ok(has_mcp) => has_mcp,
            Err(response) => return response,
        };
253

254
255
256
257
258
    if has_mcp_tools {
        debug!("MCP tools detected in streaming mode, using streaming tool loop");

        return execute_tool_loop_streaming(ctx, modified_request, &request, headers, model_id)
            .await;
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
    }

    // 3. Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = match conversions::responses_to_chat(&modified_request) {
        Ok(req) => Arc::new(req),
        Err(e) => {
            return (
                StatusCode::BAD_REQUEST,
                axum::Json(json!({
                    "error": {
                        "message": format!("Failed to convert request: {}", e),
                        "type": "invalid_request_error"
                    }
                })),
            )
                .into_response();
        }
    };

    // 4. Execute chat pipeline and convert streaming format (no MCP tools)
279
    convert_chat_stream_to_responses_stream(ctx, chat_request, headers, model_id, &request).await
280
281
282
283
284
285
286
287
288
289
290
}

/// Convert chat streaming response to responses streaming format
///
/// This function:
/// 1. Gets chat SSE stream from pipeline
/// 2. Intercepts and parses each SSE event
/// 3. Converts ChatCompletionStreamResponse → ResponsesResponse delta
/// 4. Accumulates response state for final persistence
/// 5. Emits transformed SSE events in responses format
async fn convert_chat_stream_to_responses_stream(
291
    ctx: &super::context::ResponsesContext,
292
    chat_request: Arc<chat::ChatCompletionRequest>,
293
294
295
296
297
298
299
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    original_request: &ResponsesRequest,
) -> Response {
    debug!("Converting chat SSE stream to responses SSE format");

    // Get chat streaming response
300
301
302
303
304
305
306
307
    let chat_response = ctx
        .pipeline
        .execute_chat(
            chat_request.clone(),
            headers,
            model_id,
            ctx.components.clone(),
        )
308
309
        .await;

310
311
    // Extract body from chat response
    let (_parts, body) = chat_response.into_parts();
312
313
314
315
316
317
318

    // Create channel for transformed SSE events
    let (tx, rx) = mpsc::unbounded_channel::<Result<Bytes, std::io::Error>>();

    // Spawn background task to transform stream
    let original_request_clone = original_request.clone();
    let chat_request_clone = chat_request.clone();
319
320
321
    let response_storage = ctx.response_storage.clone();
    let conversation_storage = ctx.conversation_storage.clone();
    let conversation_item_storage = ctx.conversation_item_storage.clone();
322
323
324
325
326
327

    tokio::spawn(async move {
        if let Err(e) = process_and_transform_sse_stream(
            body,
            original_request_clone,
            chat_request_clone,
328
329
330
            response_storage,
            conversation_storage,
            conversation_item_storage,
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
            tx.clone(),
        )
        .await
        {
            warn!("Error transforming SSE stream: {}", e);
            let error_event = json!({
                "error": {
                    "message": e,
                    "type": "stream_error"
                }
            });
            let _ = tx.send(Ok(Bytes::from(format!("data: {}\n\n", error_event))));
        }

        // Send final [DONE] event
        let _ = tx.send(Ok(Bytes::from("data: [DONE]\n\n")));
    });

    // Build SSE response with transformed stream
350
    build_sse_response(rx)
351
352
353
354
355
356
}

/// Process chat SSE stream and transform to responses format
async fn process_and_transform_sse_stream(
    body: Body,
    original_request: ResponsesRequest,
357
    _chat_request: Arc<chat::ChatCompletionRequest>,
358
359
360
    response_storage: Arc<dyn ResponseStorage>,
    conversation_storage: Arc<dyn ConversationStorage>,
    conversation_item_storage: Arc<dyn ConversationItemStorage>,
361
362
363
364
365
366
367
    tx: mpsc::UnboundedSender<Result<Bytes, std::io::Error>>,
) -> Result<(), String> {
    // Create accumulator for final response
    let mut accumulator = StreamingResponseAccumulator::new(&original_request);

    // Create event emitter for OpenAI-compatible streaming
    let response_id = format!("resp_{}", Uuid::new_v4());
368
    let model = original_request.model.clone();
369
370
    let created_at = chrono::Utc::now().timestamp() as u64;
    let mut event_emitter = ResponseStreamEventEmitter::new(response_id, model, created_at);
371
    event_emitter.set_original_request(original_request.clone());
372

373
374
375
376
377
378
379
380
381
382
383
    // Emit initial response.created and response.in_progress events
    let event = event_emitter.emit_created();
    event_emitter
        .send_event(&event, &tx)
        .map_err(|_| "Failed to send response.created event".to_string())?;

    let event = event_emitter.emit_in_progress();
    event_emitter
        .send_event(&event, &tx)
        .map_err(|_| "Failed to send response.in_progress event".to_string())?;

384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
    // Convert body to data stream
    let mut stream = body.into_data_stream();

    // Process stream chunks (each chunk is a complete SSE event)
    while let Some(chunk_result) = stream.next().await {
        let chunk = chunk_result.map_err(|e| format!("Stream read error: {}", e))?;

        // Convert chunk to string
        let event_str = String::from_utf8_lossy(&chunk);
        let event = event_str.trim();

        // Check for end of stream
        if event == "data: [DONE]" {
            break;
        }

        // Parse SSE event (format: "data: {...}\n\n" or "data: {...}")
        if let Some(json_str) = event.strip_prefix("data: ") {
            let json_str = json_str.trim();

            // Try to parse as ChatCompletionStreamResponse
            match serde_json::from_str::<ChatCompletionStreamResponse>(json_str) {
                Ok(chat_chunk) => {
                    // Update accumulator
                    accumulator.process_chunk(&chat_chunk);

                    // Process chunk through event emitter (emits proper OpenAI events)
                    event_emitter.process_chunk(&chat_chunk, &tx)?;
                }
                Err(_) => {
                    // Not a valid chat chunk - might be error event, pass through
                    debug!("Non-chunk SSE event, passing through: {}", event);
                    if tx.send(Ok(Bytes::from(format!("{}\n\n", event)))).is_err() {
                        return Err("Client disconnected".to_string());
                    }
                }
            }
        }
    }

    // Emit final response.completed event with accumulated usage
    let usage_json = accumulator.usage.as_ref().map(|u| {
        let mut usage_obj = json!({
427
428
            "input_tokens": u.prompt_tokens,
            "output_tokens": u.completion_tokens,
429
430
431
432
433
434
            "total_tokens": u.total_tokens
        });

        // Include reasoning_tokens if present
        if let Some(details) = &u.completion_tokens_details {
            if let Some(reasoning_tokens) = details.reasoning_tokens {
435
                usage_obj["output_tokens_details"] = json!({
436
437
438
439
440
441
442
443
444
445
446
447
                    "reasoning_tokens": reasoning_tokens
                });
            }
        }

        usage_obj
    });

    let completed_event = event_emitter.emit_completed(usage_json.as_ref());
    event_emitter.send_event(&completed_event, &tx)?;

    // Finalize and persist accumulated response
448
449
450
451
452
453
454
455
456
    let final_response = accumulator.finalize();
    persist_response_if_needed(
        conversation_storage,
        conversation_item_storage,
        response_storage,
        &final_response,
        &original_request,
    )
    .await;
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474

    Ok(())
}

/// Response accumulator for streaming responses
struct StreamingResponseAccumulator {
    // Response metadata
    response_id: String,
    model: String,
    created_at: i64,

    // Accumulated content
    content_buffer: String,
    reasoning_buffer: String,
    tool_calls: Vec<ResponseOutputItem>,

    // Completion state
    finish_reason: Option<String>,
475
    usage: Option<common::Usage>,
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525

    // Original request for final response construction
    original_request: ResponsesRequest,
}

impl StreamingResponseAccumulator {
    fn new(original_request: &ResponsesRequest) -> Self {
        Self {
            response_id: String::new(),
            model: String::new(),
            created_at: 0,
            content_buffer: String::new(),
            reasoning_buffer: String::new(),
            tool_calls: Vec::new(),
            finish_reason: None,
            usage: None,
            original_request: original_request.clone(),
        }
    }

    fn process_chunk(&mut self, chunk: &ChatCompletionStreamResponse) {
        // Initialize metadata on first chunk
        if self.response_id.is_empty() {
            self.response_id = chunk.id.clone();
            self.model = chunk.model.clone();
            self.created_at = chunk.created as i64;
        }

        // Process first choice (responses API doesn't support n>1)
        if let Some(choice) = chunk.choices.first() {
            // Accumulate content
            if let Some(content) = &choice.delta.content {
                self.content_buffer.push_str(content);
            }

            // Accumulate reasoning
            if let Some(reasoning) = &choice.delta.reasoning_content {
                self.reasoning_buffer.push_str(reasoning);
            }

            // Process tool call deltas
            if let Some(tool_call_deltas) = &choice.delta.tool_calls {
                for delta in tool_call_deltas {
                    // Use index directly (it's a u32, not Option<u32>)
                    let index = delta.index as usize;

                    // Ensure we have enough tool calls
                    while self.tool_calls.len() <= index {
                        self.tool_calls.push(ResponseOutputItem::FunctionToolCall {
                            id: String::new(),
526
                            call_id: String::new(),
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
                            name: String::new(),
                            arguments: String::new(),
                            output: None,
                            status: "in_progress".to_string(),
                        });
                    }

                    // Update the tool call at this index
                    if let ResponseOutputItem::FunctionToolCall {
                        id,
                        name,
                        arguments,
                        ..
                    } = &mut self.tool_calls[index]
                    {
                        if let Some(delta_id) = &delta.id {
                            id.push_str(delta_id);
                        }
                        if let Some(function) = &delta.function {
                            if let Some(delta_name) = &function.name {
                                name.push_str(delta_name);
                            }
                            if let Some(delta_args) = &function.arguments {
                                arguments.push_str(delta_args);
                            }
                        }
                    }
                }
            }

            // Update finish reason
            if let Some(reason) = &choice.finish_reason {
                self.finish_reason = Some(reason.clone());
            }
        }

        // Update usage
        if let Some(usage) = &chunk.usage {
            self.usage = Some(usage.clone());
        }
    }

    fn finalize(self) -> ResponsesResponse {
        let mut output: Vec<ResponseOutputItem> = Vec::new();

        // Add message content if present
        if !self.content_buffer.is_empty() {
            output.push(ResponseOutputItem::Message {
                id: format!("msg_{}", self.response_id),
                role: "assistant".to_string(),
                content: vec![ResponseContentPart::OutputText {
                    text: self.content_buffer,
                    annotations: vec![],
                    logprobs: None,
                }],
                status: "completed".to_string(),
            });
        }

        // Add reasoning if present
        if !self.reasoning_buffer.is_empty() {
            output.push(ResponseOutputItem::Reasoning {
                id: format!("reasoning_{}", self.response_id),
                summary: vec![],
591
592
593
                content: vec![ResponseReasoningContent::ReasoningText {
                    text: self.reasoning_buffer,
                }],
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
                status: Some("completed".to_string()),
            });
        }

        // Add tool calls
        output.extend(self.tool_calls);

        // Determine final status
        let status = match self.finish_reason.as_deref() {
            Some("stop") | Some("length") => ResponseStatus::Completed,
            Some("tool_calls") => ResponseStatus::InProgress,
            Some("failed") | Some("error") => ResponseStatus::Failed,
            _ => ResponseStatus::Completed,
        };

        // Convert usage
        let usage = self.usage.as_ref().map(|u| {
611
            let usage_info = common::UsageInfo {
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
                prompt_tokens: u.prompt_tokens,
                completion_tokens: u.completion_tokens,
                total_tokens: u.total_tokens,
                reasoning_tokens: u
                    .completion_tokens_details
                    .as_ref()
                    .and_then(|d| d.reasoning_tokens),
                prompt_tokens_details: None,
            };
            ResponsesUsage::Classic(usage_info)
        });

        ResponsesResponse {
            id: self.response_id,
            object: "response".to_string(),
            created_at: self.created_at,
            status,
            error: None,
            incomplete_details: None,
            instructions: self.original_request.instructions.clone(),
            max_output_tokens: self.original_request.max_output_tokens,
            model: self.model,
            output,
            parallel_tool_calls: self.original_request.parallel_tool_calls.unwrap_or(true),
            previous_response_id: self.original_request.previous_response_id.clone(),
            reasoning: None,
            store: self.original_request.store.unwrap_or(true),
            temperature: self.original_request.temperature,
            text: None,
641
            tool_choice: ToolChoice::serialize_to_string(&self.original_request.tool_choice),
642
643
644
645
646
            tools: self.original_request.tools.clone().unwrap_or_default(),
            top_p: self.original_request.top_p,
            truncation: None,
            usage,
            user: None,
647
            safety_identifier: self.original_request.user.clone(),
648
649
650
651
652
653
654
655
656
657
658
            metadata: self.original_request.metadata.clone().unwrap_or_default(),
        }
    }
}

// ============================================================================
// Helper Functions
// ============================================================================

/// Execute request without MCP tool loop (simple pipeline execution)
async fn execute_without_mcp(
659
    ctx: &super::context::ResponsesContext,
660
661
662
663
664
    modified_request: &ResponsesRequest,
    original_request: &ResponsesRequest,
    headers: Option<http::HeaderMap>,
    model_id: Option<String>,
    response_id: Option<String>,
665
) -> Result<ResponsesResponse, Response> {
666
667
    // Convert ResponsesRequest → ChatCompletionRequest
    let chat_request = conversions::responses_to_chat(modified_request)
668
        .map_err(|e| error::bad_request(format!("Failed to convert request: {}", e)))?;
669

670
671
672
    // Execute chat pipeline (errors already have proper HTTP status codes)
    let chat_response = ctx
        .pipeline
673
674
675
676
        .execute_chat_for_responses(
            Arc::new(chat_request),
            headers,
            model_id,
677
            ctx.components.clone(),
678
        )
679
        .await?; // Preserve the Response error as-is
680
681

    // Convert ChatCompletionResponse → ResponsesResponse
682
683
    conversions::chat_to_responses(&chat_response, original_request, response_id)
        .map_err(|e| error::internal_error(format!("Failed to convert to responses format: {}", e)))
684
685
686
687
}

/// Load conversation history and response chains, returning modified request
async fn load_conversation_history(
688
    ctx: &super::context::ResponsesContext,
689
    request: &ResponsesRequest,
690
) -> Result<ResponsesRequest, Response> {
691
692
693
694
695
696
    let mut modified_request = request.clone();
    let mut conversation_items: Option<Vec<ResponseInputOutputItem>> = None;

    // Handle previous_response_id by loading response chain
    if let Some(ref prev_id_str) = modified_request.previous_response_id {
        let prev_id = ResponseId::from(prev_id_str.as_str());
697
698
699
700
701
        match ctx
            .response_storage
            .get_response_chain(&prev_id, None)
            .await
        {
702
703
704
            Ok(chain) => {
                let mut items = Vec::new();
                for stored in chain.responses.iter() {
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
                    // Convert input items from stored input (which is now a JSON array)
                    if let Some(input_arr) = stored.input.as_array() {
                        for item in input_arr {
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(input_item) => {
                                    items.push(input_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored input item: {}. Item: {}",
                                        e, item
                                    );
                                }
                            }
                        }
                    }

                    // Convert output items from stored output (which is now a JSON array)
                    if let Some(output_arr) = stored.output.as_array() {
724
                        for item in output_arr {
725
726
727
728
729
730
731
732
733
734
                            match serde_json::from_value::<ResponseInputOutputItem>(item.clone()) {
                                Ok(output_item) => {
                                    items.push(output_item);
                                }
                                Err(e) => {
                                    warn!(
                                        "Failed to deserialize stored output item: {}. Item: {}",
                                        e, item
                                    );
                                }
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
                            }
                        }
                    }
                }
                conversation_items = Some(items);
                modified_request.previous_response_id = None;
            }
            Err(e) => {
                warn!(
                    "Failed to load previous response chain for {}: {}",
                    prev_id_str, e
                );
            }
        }
    }

    // Handle conversation by loading conversation history
    if let Some(ref conv_id_str) = request.conversation {
        let conv_id = ConversationId::from(conv_id_str.as_str());

755
756
757
758
759
        // Check if conversation exists - return error if not found
        let conversation = ctx
            .conversation_storage
            .get_conversation(&conv_id)
            .await
760
            .map_err(|e| error::internal_error(format!("Failed to check conversation: {}", e)))?;
761
762

        if conversation.is_none() {
763
            return Err(error::not_found(format!(
764
                "Conversation '{}' not found. Please create the conversation first using the conversations API.",
765
                conv_id_str
766
            )));
767
768
769
770
        }

        // Load conversation history
        const MAX_CONVERSATION_HISTORY_ITEMS: usize = 100;
771
        let params = data_connector::ListParams {
772
            limit: MAX_CONVERSATION_HISTORY_ITEMS,
773
            order: data_connector::SortOrder::Asc,
774
775
776
            after: None,
        };

777
778
779
780
781
        match ctx
            .conversation_item_storage
            .list_items(&conv_id, params)
            .await
        {
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
            Ok(stored_items) => {
                let mut items: Vec<ResponseInputOutputItem> = Vec::new();
                for item in stored_items.into_iter() {
                    if item.item_type == "message" {
                        if let Ok(content_parts) =
                            serde_json::from_value::<Vec<ResponseContentPart>>(item.content.clone())
                        {
                            items.push(ResponseInputOutputItem::Message {
                                id: item.id.0.clone(),
                                role: item.role.clone().unwrap_or_else(|| "user".to_string()),
                                content: content_parts,
                                status: item.status.clone(),
                            });
                        }
                    }
                }

                // Append current request
                match &modified_request.input {
                    ResponseInput::Text(text) => {
                        items.push(ResponseInputOutputItem::Message {
                            id: format!("msg_u_{}", conv_id.0),
                            role: "user".to_string(),
                            content: vec![ResponseContentPart::InputText { text: text.clone() }],
                            status: Some("completed".to_string()),
                        });
                    }
                    ResponseInput::Items(current_items) => {
810
811
                        // Process all item types, converting SimpleInputMessage to Message
                        for item in current_items.iter() {
812
                            let normalized = responses::normalize_input_item(item);
813
814
                            items.push(normalized);
                        }
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
                    }
                }

                modified_request.input = ResponseInput::Items(items);
            }
            Err(e) => {
                warn!("Failed to load conversation history: {}", e);
            }
        }
    }

    // If we have conversation_items from previous_response_id, merge them
    if let Some(mut items) = conversation_items {
        // Append current request
        match &modified_request.input {
            ResponseInput::Text(text) => {
                items.push(ResponseInputOutputItem::Message {
                    id: format!(
                        "msg_u_{}",
                        request
                            .previous_response_id
                            .as_ref()
                            .unwrap_or(&"new".to_string())
                    ),
                    role: "user".to_string(),
                    content: vec![ResponseContentPart::InputText { text: text.clone() }],
                    status: Some("completed".to_string()),
                });
            }
            ResponseInput::Items(current_items) => {
845
846
                // Process all item types, converting SimpleInputMessage to Message
                for item in current_items.iter() {
847
                    let normalized = responses::normalize_input_item(item);
848
849
                    items.push(normalized);
                }
850
851
852
853
854
855
856
857
            }
        }

        modified_request.input = ResponseInput::Items(items);
    }

    Ok(modified_request)
}