"tests/vscode:/vscode.git/clone" did not exist on "7c2ea23aaf51691620842b16fd38cef32330ccfa"
Unverified Commit 5d62b56f authored by Simo Lin's avatar Simo Lin Committed by GitHub
Browse files

[router] complete router oai spec (#8828)

parent 3ae8e3ea
......@@ -8,12 +8,116 @@ use sglang_router_rs::openai_api_types::{
};
use sglang_router_rs::routers::request_adapter::{RouteableRequest, ToPdRequest};
/// Create a default GenerateRequest for benchmarks with minimal fields set
fn default_generate_request() -> GenerateRequest {
GenerateRequest {
text: None,
prompt: None,
input_ids: None,
stream: false,
parameters: None,
sampling_params: None,
return_logprob: false,
// SGLang Extensions
lora_path: None,
session_params: None,
return_hidden_states: false,
rid: None,
}
}
/// Create a default ChatCompletionRequest for benchmarks with minimal fields set
fn default_chat_completion_request() -> ChatCompletionRequest {
ChatCompletionRequest {
model: String::new(),
messages: vec![],
max_tokens: None,
max_completion_tokens: None,
temperature: None,
top_p: None,
n: None,
stream: false,
stream_options: None,
stop: None,
presence_penalty: None,
frequency_penalty: None,
logit_bias: None,
logprobs: false,
top_logprobs: None,
user: None,
response_format: None,
seed: None,
tools: None,
tool_choice: None,
parallel_tool_calls: None,
function_call: None,
functions: None,
// SGLang Extensions
top_k: None,
min_p: None,
min_tokens: None,
repetition_penalty: None,
regex: None,
ebnf: None,
stop_token_ids: None,
no_stop_trim: false,
ignore_eos: false,
continue_final_message: false,
skip_special_tokens: true,
// SGLang Extensions
lora_path: None,
session_params: None,
separate_reasoning: true,
stream_reasoning: true,
return_hidden_states: false,
}
}
/// Create a default CompletionRequest for benchmarks with minimal fields set
fn default_completion_request() -> CompletionRequest {
CompletionRequest {
model: String::new(),
prompt: StringOrArray::String(String::new()),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
n: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
// SGLang Extensions
top_k: None,
min_p: None,
min_tokens: None,
repetition_penalty: None,
regex: None,
ebnf: None,
json_schema: None,
stop_token_ids: None,
no_stop_trim: false,
ignore_eos: false,
skip_special_tokens: true,
// SGLang Extensions
lora_path: None,
session_params: None,
return_hidden_states: false,
other: serde_json::Map::new(),
}
}
// Sample request data for benchmarks
fn create_sample_generate_request() -> GenerateRequest {
GenerateRequest {
text: Some("Write a story about artificial intelligence".to_string()),
input_ids: None,
prompt: None,
parameters: Some(GenerateParameters {
max_new_tokens: Some(100),
temperature: Some(0.8),
......@@ -31,8 +135,7 @@ fn create_sample_generate_request() -> GenerateRequest {
repetition_penalty: Some(1.0),
..Default::default()
}),
stream: false,
return_logprob: false,
..default_generate_request()
}
}
......@@ -58,22 +161,10 @@ fn create_sample_chat_completion_request() -> ChatCompletionRequest {
temperature: Some(0.7),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
logit_bias: None,
logprobs: false,
top_logprobs: None,
user: None,
response_format: None,
seed: None,
tools: None,
tool_choice: None,
parallel_tool_calls: Some(true),
function_call: None,
functions: None,
..default_chat_completion_request()
}
}
......@@ -81,23 +172,14 @@ fn create_sample_completion_request() -> CompletionRequest {
CompletionRequest {
model: "text-davinci-003".to_string(),
prompt: StringOrArray::String("Complete this sentence: The future of AI is".to_string()),
suffix: None,
max_tokens: Some(50),
temperature: Some(0.8),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
best_of: Some(1),
logit_bias: None,
user: None,
seed: None,
other: serde_json::Map::new(),
..default_completion_request()
}
}
......@@ -121,6 +203,7 @@ fn create_large_chat_completion_request() -> ChatCompletionRequest {
name: None,
tool_calls: None,
function_call: None,
reasoning_content: None,
});
}
......@@ -132,22 +215,13 @@ fn create_large_chat_completion_request() -> ChatCompletionRequest {
temperature: Some(0.7),
top_p: Some(0.95),
n: Some(1),
stream: false,
stream_options: None,
stop: None,
presence_penalty: Some(0.1),
frequency_penalty: Some(0.1),
logit_bias: None,
logprobs: false,
top_logprobs: Some(5),
user: Some("benchmark_user".to_string()),
response_format: None,
seed: Some(42),
tools: None,
tool_choice: None,
parallel_tool_calls: Some(true),
function_call: None,
functions: None,
..default_chat_completion_request()
}
}
......@@ -331,32 +405,17 @@ fn bench_throughput_by_size(c: &mut Criterion) {
// Create requests of different sizes
let small_generate = GenerateRequest {
text: Some("Hi".to_string()),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
let medium_generate = GenerateRequest {
text: Some("Write a medium length story about AI".repeat(10)),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
let large_generate = GenerateRequest {
text: Some("Write a very long and detailed story about artificial intelligence and its impact on society".repeat(100)),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
for (name, req) in [
......
......@@ -6,6 +6,21 @@ use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
/// Helper function for serde default value
fn default_true() -> bool {
true
}
// ============= SGLang-Specific Types =============
/// LoRA adapter path - can be single path or batch of paths
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(untagged)]
pub enum LoRAPath {
Single(Option<String>),
Batch(Vec<Option<String>>),
}
/// Common trait for all generation requests
pub trait GenerationRequest: Send + Sync {
/// Check if the request is for streaming
......@@ -92,6 +107,64 @@ pub struct CompletionRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub seed: Option<i64>,
// ============= SGLang Extensions =============
/// Top-k sampling parameter (-1 to disable)
#[serde(skip_serializing_if = "Option::is_none")]
pub top_k: Option<i32>,
/// Min-p nucleus sampling parameter
#[serde(skip_serializing_if = "Option::is_none")]
pub min_p: Option<f32>,
/// Minimum number of tokens to generate
#[serde(skip_serializing_if = "Option::is_none")]
pub min_tokens: Option<u32>,
/// Repetition penalty for reducing repetitive text
#[serde(skip_serializing_if = "Option::is_none")]
pub repetition_penalty: Option<f32>,
/// Regex constraint for output generation
#[serde(skip_serializing_if = "Option::is_none")]
pub regex: Option<String>,
/// EBNF grammar constraint for structured output
#[serde(skip_serializing_if = "Option::is_none")]
pub ebnf: Option<String>,
/// JSON schema constraint for structured output
#[serde(skip_serializing_if = "Option::is_none")]
pub json_schema: Option<String>,
/// Specific token IDs to use as stop conditions
#[serde(skip_serializing_if = "Option::is_none")]
pub stop_token_ids: Option<Vec<i32>>,
/// Skip trimming stop tokens from output
#[serde(default)]
pub no_stop_trim: bool,
/// Ignore end-of-sequence tokens during generation
#[serde(default)]
pub ignore_eos: bool,
/// Skip special tokens during detokenization
#[serde(default = "default_true")]
pub skip_special_tokens: bool,
// ============= SGLang Extensions =============
/// Path to LoRA adapter(s) for model customization
#[serde(skip_serializing_if = "Option::is_none")]
pub lora_path: Option<LoRAPath>,
/// Session parameters for continual prompting
#[serde(skip_serializing_if = "Option::is_none")]
pub session_params: Option<HashMap<String, serde_json::Value>>,
/// Return model hidden states
#[serde(default)]
pub return_hidden_states: bool,
/// Additional fields including bootstrap info for PD routing
#[serde(flatten)]
pub other: serde_json::Map<String, serde_json::Value>,
......@@ -166,7 +239,7 @@ pub struct ChatCompletionRequest {
/// Modify the likelihood of specified tokens appearing in the completion
#[serde(skip_serializing_if = "Option::is_none")]
pub logit_bias: Option<HashMap<String, i32>>,
pub logit_bias: Option<HashMap<String, f32>>,
/// A unique identifier representing your end-user
#[serde(skip_serializing_if = "Option::is_none")]
......@@ -207,6 +280,72 @@ pub struct ChatCompletionRequest {
/// Deprecated: use tool_choice instead
#[serde(skip_serializing_if = "Option::is_none")]
pub function_call: Option<FunctionCall>,
// ============= SGLang Extensions =============
/// Top-k sampling parameter (-1 to disable)
#[serde(skip_serializing_if = "Option::is_none")]
pub top_k: Option<i32>,
/// Min-p nucleus sampling parameter
#[serde(skip_serializing_if = "Option::is_none")]
pub min_p: Option<f32>,
/// Minimum number of tokens to generate
#[serde(skip_serializing_if = "Option::is_none")]
pub min_tokens: Option<u32>,
/// Repetition penalty for reducing repetitive text
#[serde(skip_serializing_if = "Option::is_none")]
pub repetition_penalty: Option<f32>,
/// Regex constraint for output generation
#[serde(skip_serializing_if = "Option::is_none")]
pub regex: Option<String>,
/// EBNF grammar constraint for structured output
#[serde(skip_serializing_if = "Option::is_none")]
pub ebnf: Option<String>,
/// Specific token IDs to use as stop conditions
#[serde(skip_serializing_if = "Option::is_none")]
pub stop_token_ids: Option<Vec<i32>>,
/// Skip trimming stop tokens from output
#[serde(default)]
pub no_stop_trim: bool,
/// Ignore end-of-sequence tokens during generation
#[serde(default)]
pub ignore_eos: bool,
/// Continue generating from final assistant message
#[serde(default)]
pub continue_final_message: bool,
/// Skip special tokens during detokenization
#[serde(default = "default_true")]
pub skip_special_tokens: bool,
// ============= SGLang Extensions =============
/// Path to LoRA adapter(s) for model customization
#[serde(skip_serializing_if = "Option::is_none")]
pub lora_path: Option<LoRAPath>,
/// Session parameters for continual prompting
#[serde(skip_serializing_if = "Option::is_none")]
pub session_params: Option<HashMap<String, serde_json::Value>>,
/// Separate reasoning content from final answer (O1-style models)
#[serde(default = "default_true")]
pub separate_reasoning: bool,
/// Stream reasoning tokens during generation
#[serde(default = "default_true")]
pub stream_reasoning: bool,
/// Return model hidden states
#[serde(default)]
pub return_hidden_states: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
......@@ -234,6 +373,9 @@ pub enum ChatMessage {
tool_calls: Option<Vec<ToolCall>>,
#[serde(skip_serializing_if = "Option::is_none")]
function_call: Option<FunctionCallResponse>,
/// Reasoning content for O1-style models (SGLang extension)
#[serde(skip_serializing_if = "Option::is_none")]
reasoning_content: Option<String>,
},
Tool {
role: String, // "tool"
......@@ -378,7 +520,20 @@ impl GenerationRequest for ChatCompletionRequest {
Some(texts.join(" "))
}
},
ChatMessage::Assistant { content, .. } => content.clone(),
ChatMessage::Assistant {
content,
reasoning_content,
..
} => {
// Combine content and reasoning content for routing decisions
let main_content = content.clone().unwrap_or_default();
let reasoning = reasoning_content.clone().unwrap_or_default();
if main_content.is_empty() && reasoning.is_empty() {
None
} else {
Some(format!("{} {}", main_content, reasoning).trim().to_string())
}
}
ChatMessage::Tool { content, .. } => Some(content.clone()),
ChatMessage::Function { content, .. } => Some(content.clone()),
})
......@@ -418,6 +573,23 @@ pub struct GenerateRequest {
/// Whether to return logprobs
#[serde(default)]
pub return_logprob: bool,
// ============= SGLang Extensions =============
/// Path to LoRA adapter(s) for model customization
#[serde(skip_serializing_if = "Option::is_none")]
pub lora_path: Option<LoRAPath>,
/// Session parameters for continual prompting
#[serde(skip_serializing_if = "Option::is_none")]
pub session_params: Option<HashMap<String, serde_json::Value>>,
/// Return model hidden states
#[serde(default)]
pub return_hidden_states: bool,
/// Request ID for tracking
#[serde(skip_serializing_if = "Option::is_none")]
pub rid: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
......@@ -485,6 +657,18 @@ pub struct SamplingParams {
pub skip_special_tokens: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub json_schema: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub regex: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ebnf: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub min_p: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub min_tokens: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stop_token_ids: Option<Vec<i32>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub no_stop_trim: Option<bool>,
}
impl GenerationRequest for GenerateRequest {
......@@ -561,6 +745,12 @@ pub struct CompletionChoice {
#[serde(skip_serializing_if = "Option::is_none")]
pub logprobs: Option<LogProbs>,
pub finish_reason: Option<String>, // "stop", "length", "content_filter", etc.
/// Information about which stop condition was matched
#[serde(skip_serializing_if = "Option::is_none")]
pub matched_stop: Option<serde_json::Value>, // Can be string or integer
/// Hidden states from the model (SGLang extension)
#[serde(skip_serializing_if = "Option::is_none")]
pub hidden_states: Option<Vec<f32>>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
......@@ -591,6 +781,12 @@ pub struct ChatChoice {
#[serde(skip_serializing_if = "Option::is_none")]
pub logprobs: Option<ChatLogProbs>,
pub finish_reason: Option<String>, // "stop", "length", "tool_calls", "content_filter", "function_call"
/// Information about which stop condition was matched
#[serde(skip_serializing_if = "Option::is_none")]
pub matched_stop: Option<serde_json::Value>, // Can be string or integer
/// Hidden states from the model (SGLang extension)
#[serde(skip_serializing_if = "Option::is_none")]
pub hidden_states: Option<Vec<f32>>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
......@@ -681,6 +877,9 @@ pub struct ChatMessageDelta {
pub tool_calls: Option<Vec<ToolCallDelta>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub function_call: Option<FunctionCallDelta>,
/// Reasoning content delta for O1-style models (SGLang extension)
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_content: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
......
......@@ -278,11 +278,11 @@ mod bootstrap_tests {
use crate::core::BasicWorker;
use crate::openai_api_types::StringOrArray;
#[test]
fn test_completion_batch_size_with_array_prompt() {
let req = CompletionRequest {
model: "test".to_string(),
prompt: StringOrArray::Array(vec!["prompt1".to_string(), "prompt2".to_string()]),
/// Create a default CompletionRequest for testing with minimal fields set
fn default_completion_request() -> CompletionRequest {
CompletionRequest {
model: String::new(),
prompt: StringOrArray::String(String::new()),
n: None,
other: serde_json::Map::new(),
suffix: None,
......@@ -300,6 +300,31 @@ mod bootstrap_tests {
logit_bias: None,
user: None,
seed: None,
// SGLang Extensions
top_k: None,
min_p: None,
min_tokens: None,
repetition_penalty: None,
regex: None,
ebnf: None,
json_schema: None,
stop_token_ids: None,
no_stop_trim: false,
ignore_eos: false,
skip_special_tokens: true,
// SGLang Extensions
lora_path: None,
session_params: None,
return_hidden_states: false,
}
}
#[test]
fn test_completion_batch_size_with_array_prompt() {
let req = CompletionRequest {
model: "test".to_string(),
prompt: StringOrArray::Array(vec!["prompt1".to_string(), "prompt2".to_string()]),
..default_completion_request()
};
// Should return batch size for array prompt
......@@ -311,23 +336,7 @@ mod bootstrap_tests {
let req = CompletionRequest {
model: "test".to_string(),
prompt: StringOrArray::String("single prompt".to_string()),
n: None,
other: serde_json::Map::new(),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
..default_completion_request()
};
// Should return None for single prompt
......@@ -340,22 +349,7 @@ mod bootstrap_tests {
model: "test".to_string(),
prompt: StringOrArray::String("single prompt".to_string()),
n: Some(3),
other: serde_json::Map::new(),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
..default_completion_request()
};
// Should return None for single string prompt, even with n > 1
......@@ -368,23 +362,7 @@ mod bootstrap_tests {
let mut req = CompletionRequest {
model: "test".to_string(),
prompt: StringOrArray::Array(vec!["prompt1".to_string(), "prompt2".to_string()]),
n: None,
other: serde_json::Map::new(),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
..default_completion_request()
};
// Set bootstrap info - should always use single values
......@@ -418,23 +396,7 @@ mod bootstrap_tests {
let mut req = CompletionRequest {
model: "test".to_string(),
prompt: StringOrArray::Array(vec!["prompt1".to_string(), "prompt2".to_string()]),
n: None,
other: serde_json::Map::new(),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
..default_completion_request()
};
// Set bootstrap info with arrays
......
This diff is collapsed.
......@@ -8,14 +8,118 @@ use sglang_router_rs::openai_api_types::{
};
use sglang_router_rs::routers::request_adapter::{RouteableRequest, ToPdRequest};
/// Create a default GenerateRequest for benchmarks with minimal fields set
fn default_generate_request() -> GenerateRequest {
GenerateRequest {
text: None,
prompt: None,
input_ids: None,
stream: false,
parameters: None,
sampling_params: None,
return_logprob: false,
// SGLang Extensions
lora_path: None,
session_params: None,
return_hidden_states: false,
rid: None,
}
}
/// Create a default ChatCompletionRequest for benchmarks with minimal fields set
fn default_chat_completion_request() -> ChatCompletionRequest {
ChatCompletionRequest {
model: String::new(),
messages: vec![],
max_tokens: None,
max_completion_tokens: None,
temperature: None,
top_p: None,
n: None,
stream: false,
stream_options: None,
stop: None,
presence_penalty: None,
frequency_penalty: None,
logit_bias: None,
logprobs: false,
top_logprobs: None,
user: None,
response_format: None,
seed: None,
tools: None,
tool_choice: None,
parallel_tool_calls: None,
function_call: None,
functions: None,
// SGLang Extensions
top_k: None,
min_p: None,
min_tokens: None,
repetition_penalty: None,
regex: None,
ebnf: None,
stop_token_ids: None,
no_stop_trim: false,
ignore_eos: false,
continue_final_message: false,
skip_special_tokens: true,
// SGLang Extensions
lora_path: None,
session_params: None,
separate_reasoning: true,
stream_reasoning: true,
return_hidden_states: false,
}
}
/// Create a default CompletionRequest for benchmarks with minimal fields set
fn default_completion_request() -> CompletionRequest {
CompletionRequest {
model: String::new(),
prompt: StringOrArray::String(String::new()),
suffix: None,
max_tokens: None,
temperature: None,
top_p: None,
n: None,
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: None,
frequency_penalty: None,
best_of: None,
logit_bias: None,
user: None,
seed: None,
// SGLang Extensions
top_k: None,
min_p: None,
min_tokens: None,
repetition_penalty: None,
regex: None,
ebnf: None,
json_schema: None,
stop_token_ids: None,
no_stop_trim: false,
ignore_eos: false,
skip_special_tokens: true,
// SGLang Extensions
lora_path: None,
session_params: None,
return_hidden_states: false,
other: serde_json::Map::new(),
}
}
#[test]
fn test_benchmark_request_creation() {
// Ensure all benchmark request types can be created without panicking
let generate_req = GenerateRequest {
text: Some("Test prompt".to_string()),
input_ids: None,
prompt: None,
parameters: Some(GenerateParameters {
max_new_tokens: Some(100),
temperature: Some(0.8),
......@@ -33,8 +137,7 @@ fn test_benchmark_request_creation() {
repetition_penalty: Some(1.0),
..Default::default()
}),
stream: false,
return_logprob: false,
..default_generate_request()
};
let chat_req = ChatCompletionRequest {
......@@ -49,44 +152,23 @@ fn test_benchmark_request_creation() {
temperature: Some(0.7),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
logit_bias: None,
logprobs: false,
top_logprobs: None,
user: None,
response_format: None,
seed: None,
tools: None,
tool_choice: None,
parallel_tool_calls: Some(true),
function_call: None,
functions: None,
..default_chat_completion_request()
};
let completion_req = CompletionRequest {
model: "test-model".to_string(),
prompt: StringOrArray::String("Test prompt".to_string()),
suffix: None,
max_tokens: Some(50),
temperature: Some(0.8),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
best_of: Some(1),
logit_bias: None,
user: None,
seed: None,
other: serde_json::Map::new(),
..default_completion_request()
};
// Test serialization works
......@@ -101,12 +183,7 @@ fn test_benchmark_serialization_roundtrip() {
let generate_req = GenerateRequest {
text: Some("Test prompt".to_string()),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
// Serialize and deserialize
......@@ -125,12 +202,7 @@ fn test_benchmark_request_adaptation() {
let generate_req = GenerateRequest {
text: Some("Test prompt".to_string()),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
let chat_req = ChatCompletionRequest {
......@@ -145,44 +217,23 @@ fn test_benchmark_request_adaptation() {
temperature: Some(0.7),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
logit_bias: None,
logprobs: false,
top_logprobs: None,
user: None,
response_format: None,
seed: None,
tools: None,
tool_choice: None,
parallel_tool_calls: Some(true),
function_call: None,
functions: None,
..default_chat_completion_request()
};
let completion_req = CompletionRequest {
model: "test-model".to_string(),
prompt: StringOrArray::String("Test prompt".to_string()),
suffix: None,
max_tokens: Some(50),
temperature: Some(0.8),
top_p: Some(1.0),
n: Some(1),
stream: false,
stream_options: None,
logprobs: None,
echo: false,
stop: None,
presence_penalty: Some(0.0),
frequency_penalty: Some(0.0),
best_of: Some(1),
logit_bias: None,
user: None,
seed: None,
other: serde_json::Map::new(),
..default_completion_request()
};
// Test PD adaptation (should not panic)
......@@ -197,12 +248,7 @@ fn test_benchmark_regular_routing() {
let generate_req = GenerateRequest {
text: Some("Test prompt".to_string()),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
// Test regular routing methods (should not panic)
......@@ -217,12 +263,7 @@ fn test_benchmark_performance_baseline() {
let generate_req = GenerateRequest {
text: Some("Short test prompt".to_string()),
input_ids: None,
prompt: None,
parameters: None,
sampling_params: None,
stream: false,
return_logprob: false,
..default_generate_request()
};
// Serialization should be fast (< 1ms for simple requests)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment